mirror of
https://github.com/bringout/oca-ocb-core.git
synced 2026-04-21 14:32:04 +02:00
Initial commit: Core packages
This commit is contained in:
commit
12c29a983b
9512 changed files with 8379910 additions and 0 deletions
34
odoo-bringout-oca-ocb-base/odoo/tools/__init__.py
Normal file
34
odoo-bringout-oca-ocb-base/odoo/tools/__init__.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
SUPPORTED_DEBUGGER = {'pdb', 'ipdb', 'wdb', 'pudb'}
|
||||
from . import _monkeypatches
|
||||
from . import _monkeypatches_pytz
|
||||
|
||||
from werkzeug import urls
|
||||
if not hasattr(urls, 'url_join'):
|
||||
# see https://github.com/pallets/werkzeug/compare/2.3.0..3.0.0
|
||||
# see https://github.com/pallets/werkzeug/blob/2.3.0/src/werkzeug/urls.py for replacement
|
||||
from . import _monkeypatches_urls
|
||||
|
||||
from . import appdirs
|
||||
from . import cloc
|
||||
from . import pdf
|
||||
from . import pycompat
|
||||
from . import win32
|
||||
from .barcode import *
|
||||
from .config import config
|
||||
from .date_utils import *
|
||||
from .float_utils import *
|
||||
from .func import *
|
||||
from .image import *
|
||||
from .mail import *
|
||||
from .misc import *
|
||||
from .query import Query, _generate_table_alias
|
||||
from .sql import *
|
||||
from .template_inheritance import *
|
||||
from .translate import *
|
||||
from .xml_utils import *
|
||||
from .convert import *
|
||||
from . import osutil
|
||||
from .js_transpiler import transpile_javascript, is_odoo_module, URL_RE, ODOO_MODULE_RE
|
||||
from .sourcemap_generator import SourceMapGenerator
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
167
odoo-bringout-oca-ocb-base/odoo/tools/_monkeypatches.py
Normal file
167
odoo-bringout-oca-ocb-base/odoo/tools/_monkeypatches.py
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
import ast
|
||||
import os
|
||||
import logging
|
||||
from email._policybase import _PolicyBase
|
||||
from odoo import MIN_PY_VERSION
|
||||
from shutil import copyfileobj
|
||||
from types import CodeType
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import num2words
|
||||
from .num2words_patch import Num2Word_AR_Fixed
|
||||
except ImportError:
|
||||
_logger.warning("num2words is not available, Arabic number to words conversion will not work")
|
||||
num2words = None
|
||||
|
||||
from urllib3 import PoolManager
|
||||
from werkzeug.datastructures import FileStorage, MultiDict
|
||||
from werkzeug.routing import Rule
|
||||
from werkzeug.wrappers import Request, Response
|
||||
|
||||
from .json import scriptsafe
|
||||
|
||||
try:
|
||||
from stdnum import util
|
||||
except ImportError:
|
||||
util = None
|
||||
|
||||
try:
|
||||
from xlrd import xlsx
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
from lxml import etree
|
||||
# xlrd.xlsx supports defusedxml, defusedxml's etree interface is broken
|
||||
# (missing ElementTree and thus ElementTree.iter) which causes a fallback to
|
||||
# Element.getiterator(), triggering a warning before 3.9 and an error from 3.9.
|
||||
#
|
||||
# We have defusedxml installed because zeep has a hard dep on defused and
|
||||
# doesn't want to drop it (mvantellingen/python-zeep#1014).
|
||||
#
|
||||
# Ignore the check and set the relevant flags directly using lxml as we have a
|
||||
# hard dependency on it.
|
||||
xlsx.ET = etree
|
||||
xlsx.ET_has_iterparse = True
|
||||
xlsx.Element_has_iter = True
|
||||
|
||||
FileStorage.save = lambda self, dst, buffer_size=1<<20: copyfileobj(self.stream, dst, buffer_size)
|
||||
|
||||
|
||||
def _multidict_deepcopy(self, memo=None):
|
||||
return orig_deepcopy(self)
|
||||
|
||||
|
||||
orig_deepcopy = MultiDict.deepcopy
|
||||
MultiDict.deepcopy = _multidict_deepcopy
|
||||
|
||||
Request.json_module = Response.json_module = scriptsafe
|
||||
|
||||
get_func_code = getattr(Rule, '_get_func_code', None)
|
||||
if get_func_code:
|
||||
@staticmethod
|
||||
def _get_func_code(code, name):
|
||||
assert isinstance(code, CodeType)
|
||||
return get_func_code(code, name)
|
||||
Rule._get_func_code = _get_func_code
|
||||
|
||||
orig_literal_eval = ast.literal_eval
|
||||
|
||||
def literal_eval(expr):
|
||||
# limit the size of the expression to avoid segmentation faults
|
||||
# the default limit is set to 100KiB
|
||||
# can be overridden by setting the ODOO_LIMIT_LITEVAL_BUFFER buffer_size_environment variable
|
||||
|
||||
buffer_size = 102400
|
||||
buffer_size_env = os.getenv("ODOO_LIMIT_LITEVAL_BUFFER")
|
||||
|
||||
if buffer_size_env:
|
||||
if buffer_size_env.isdigit():
|
||||
buffer_size = int(buffer_size_env)
|
||||
else:
|
||||
_logger.error("ODOO_LIMIT_LITEVAL_BUFFER has to be an integer, defaulting to 100KiB")
|
||||
|
||||
if isinstance(expr, str) and len(expr) > buffer_size:
|
||||
raise ValueError("expression can't exceed buffer limit")
|
||||
|
||||
return orig_literal_eval(expr)
|
||||
|
||||
ast.literal_eval = literal_eval
|
||||
|
||||
if MIN_PY_VERSION >= (3, 12):
|
||||
raise RuntimeError("The num2words monkey patch is obsolete. Bump the version of the library to the latest available in the official package repository, if it hasn't already been done, and remove the patch.")
|
||||
|
||||
if num2words:
|
||||
num2words.CONVERTER_CLASSES["ar"] = Num2Word_AR_Fixed()
|
||||
|
||||
_soap_clients = {}
|
||||
|
||||
|
||||
def new_get_soap_client(wsdlurl, timeout=30):
|
||||
# stdnum library does not set the timeout for the zeep Transport class correctly
|
||||
# (timeout is to fetch the wsdl and operation_timeout is to perform the call),
|
||||
# requiring us to monkey patch the get_soap_client function.
|
||||
# Can be removed when https://github.com/arthurdejong/python-stdnum/issues/444 is
|
||||
# resolved and the version of the dependency is updated.
|
||||
# The code is a copy of the original apart for the line related to the Transport class.
|
||||
# This was done to keep the code as similar to the original and to reduce the possibility
|
||||
# of introducing import errors, even though some imports are not in the requirements.
|
||||
# See https://github.com/odoo/odoo/pull/173359 for a more thorough explanation.
|
||||
if (wsdlurl, timeout) not in _soap_clients:
|
||||
try:
|
||||
from zeep.transports import Transport
|
||||
transport = Transport(operation_timeout=timeout, timeout=timeout) # operational_timeout added here
|
||||
from zeep import CachingClient
|
||||
client = CachingClient(wsdlurl, transport=transport).service
|
||||
except ImportError:
|
||||
# fall back to non-caching zeep client
|
||||
try:
|
||||
from zeep import Client
|
||||
client = Client(wsdlurl, transport=transport).service
|
||||
except ImportError:
|
||||
# other implementations require passing the proxy config
|
||||
try:
|
||||
from urllib import getproxies
|
||||
except ImportError:
|
||||
from urllib.request import getproxies
|
||||
# fall back to suds
|
||||
try:
|
||||
from suds.client import Client
|
||||
client = Client(
|
||||
wsdlurl, proxy=getproxies(), timeout=timeout).service
|
||||
except ImportError:
|
||||
# use pysimplesoap as last resort
|
||||
try:
|
||||
from pysimplesoap.client import SoapClient
|
||||
client = SoapClient(
|
||||
wsdl=wsdlurl, proxy=getproxies(), timeout=timeout)
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
'No SOAP library (such as zeep) found')
|
||||
_soap_clients[(wsdlurl, timeout)] = client
|
||||
return _soap_clients[(wsdlurl, timeout)]
|
||||
|
||||
|
||||
if util:
|
||||
util.get_soap_client = new_get_soap_client
|
||||
|
||||
|
||||
def pool_init(self, *args, **kwargs):
|
||||
orig_pool_init(self, *args, **kwargs)
|
||||
self.pool_classes_by_scheme = {**self.pool_classes_by_scheme}
|
||||
|
||||
|
||||
orig_pool_init = PoolManager.__init__
|
||||
PoolManager.__init__ = pool_init
|
||||
|
||||
|
||||
def policy_clone(self, **kwargs):
|
||||
for arg in kwargs:
|
||||
if arg.startswith("_") or "__" in arg:
|
||||
raise AttributeError(f"{self.__class__.__name__!r} object has no attribute {arg!r}")
|
||||
return orig_policy_clone(self, **kwargs)
|
||||
|
||||
|
||||
orig_policy_clone = _PolicyBase.clone
|
||||
_PolicyBase.clone = policy_clone
|
||||
132
odoo-bringout-oca-ocb-base/odoo/tools/_monkeypatches_pytz.py
Normal file
132
odoo-bringout-oca-ocb-base/odoo/tools/_monkeypatches_pytz.py
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
"""
|
||||
In ubuntu noble, some timezone where removed leading to errors when trying to assign/access them.
|
||||
|
||||
This was partially fixed in the code by removing all references to old timezones but one issue remains:
|
||||
if a database contains timezones that are not defined in the os, the resolution will fail and break
|
||||
at runtime.
|
||||
|
||||
This patches proposes to alter timezone to fallback on the new canonical timezone if the timezone was removed.
|
||||
|
||||
This list was generated by checking all symlink in /usr/share/zoneinfo in ubuntu 22.04 that disapeared in ubuntu 24.04
|
||||
|
||||
This solutions will work when moving a database from one server to another, even without migration.
|
||||
This list could be improved for other purposes.
|
||||
|
||||
"""
|
||||
|
||||
import pytz
|
||||
|
||||
_tz_mapping = {
|
||||
"Africa/Asmera": "Africa/Nairobi",
|
||||
"America/Argentina/ComodRivadavia": "America/Argentina/Catamarca",
|
||||
"America/Buenos_Aires": "America/Argentina/Buenos_Aires",
|
||||
"America/Cordoba": "America/Argentina/Cordoba",
|
||||
"America/Fort_Wayne": "America/Indiana/Indianapolis",
|
||||
"America/Indianapolis": "America/Indiana/Indianapolis",
|
||||
"America/Jujuy": "America/Argentina/Jujuy",
|
||||
"America/Knox_IN": "America/Indiana/Knox",
|
||||
"America/Louisville": "America/Kentucky/Louisville",
|
||||
"America/Mendoza": "America/Argentina/Mendoza",
|
||||
"America/Rosario": "America/Argentina/Cordoba",
|
||||
"Antarctica/South_Pole": "Pacific/Auckland",
|
||||
"Asia/Ashkhabad": "Asia/Ashgabat",
|
||||
"Asia/Calcutta": "Asia/Kolkata",
|
||||
"Asia/Chungking": "Asia/Shanghai",
|
||||
"Asia/Dacca": "Asia/Dhaka",
|
||||
"Asia/Katmandu": "Asia/Kathmandu",
|
||||
"Asia/Macao": "Asia/Macau",
|
||||
"Asia/Rangoon": "Asia/Yangon",
|
||||
"Asia/Saigon": "Asia/Ho_Chi_Minh",
|
||||
"Asia/Thimbu": "Asia/Thimphu",
|
||||
"Asia/Ujung_Pandang": "Asia/Makassar",
|
||||
"Asia/Ulan_Bator": "Asia/Ulaanbaatar",
|
||||
"Atlantic/Faeroe": "Atlantic/Faroe",
|
||||
"Australia/ACT": "Australia/Sydney",
|
||||
"Australia/LHI": "Australia/Lord_Howe",
|
||||
"Australia/North": "Australia/Darwin",
|
||||
"Australia/NSW": "Australia/Sydney",
|
||||
"Australia/Queensland": "Australia/Brisbane",
|
||||
"Australia/South": "Australia/Adelaide",
|
||||
"Australia/Tasmania": "Australia/Hobart",
|
||||
"Australia/Victoria": "Australia/Melbourne",
|
||||
"Australia/West": "Australia/Perth",
|
||||
"Brazil/Acre": "America/Rio_Branco",
|
||||
"Brazil/DeNoronha": "America/Noronha",
|
||||
"Brazil/East": "America/Sao_Paulo",
|
||||
"Brazil/West": "America/Manaus",
|
||||
"Canada/Atlantic": "America/Halifax",
|
||||
"Canada/Central": "America/Winnipeg",
|
||||
"Canada/Eastern": "America/Toronto",
|
||||
"Canada/Mountain": "America/Edmonton",
|
||||
"Canada/Newfoundland": "America/St_Johns",
|
||||
"Canada/Pacific": "America/Vancouver",
|
||||
"Canada/Saskatchewan": "America/Regina",
|
||||
"Canada/Yukon": "America/Whitehorse",
|
||||
"Chile/Continental": "America/Santiago",
|
||||
"Chile/EasterIsland": "Pacific/Easter",
|
||||
"Cuba": "America/Havana",
|
||||
"Egypt": "Africa/Cairo",
|
||||
"Eire": "Europe/Dublin",
|
||||
"Europe/Kiev": "Europe/Kyiv",
|
||||
"Europe/Uzhgorod": "Europe/Kyiv",
|
||||
"Europe/Zaporozhye": "Europe/Kyiv",
|
||||
"GB": "Europe/London",
|
||||
"GB-Eire": "Europe/London",
|
||||
"GMT+0": "Etc/GMT",
|
||||
"GMT-0": "Etc/GMT",
|
||||
"GMT0": "Etc/GMT",
|
||||
"Greenwich": "Etc/GMT",
|
||||
"Hongkong": "Asia/Hong_Kong",
|
||||
"Iceland": "Africa/Abidjan",
|
||||
"Iran": "Asia/Tehran",
|
||||
"Israel": "Asia/Jerusalem",
|
||||
"Jamaica": "America/Jamaica",
|
||||
"Japan": "Asia/Tokyo",
|
||||
"Kwajalein": "Pacific/Kwajalein",
|
||||
"Libya": "Africa/Tripoli",
|
||||
"Mexico/BajaNorte": "America/Tijuana",
|
||||
"Mexico/BajaSur": "America/Mazatlan",
|
||||
"Mexico/General": "America/Mexico_City",
|
||||
"Navajo": "America/Denver",
|
||||
"NZ": "Pacific/Auckland",
|
||||
"NZ-CHAT": "Pacific/Chatham",
|
||||
"Pacific/Enderbury": "Pacific/Kanton",
|
||||
"Pacific/Ponape": "Pacific/Guadalcanal",
|
||||
"Pacific/Truk": "Pacific/Port_Moresby",
|
||||
"Poland": "Europe/Warsaw",
|
||||
"Portugal": "Europe/Lisbon",
|
||||
"PRC": "Asia/Shanghai",
|
||||
"ROC": "Asia/Taipei",
|
||||
"ROK": "Asia/Seoul",
|
||||
"Singapore": "Asia/Singapore",
|
||||
"Türkiye": "Europe/Istanbul",
|
||||
"UCT": "Etc/UTC",
|
||||
"Universal": "Etc/UTC",
|
||||
"US/Alaska": "America/Anchorage",
|
||||
"US/Aleutian": "America/Adak",
|
||||
"US/Arizona": "America/Phoenix",
|
||||
"US/Central": "America/Chicago",
|
||||
"US/Eastern": "America/New_York",
|
||||
"US/East-Indiana": "America/Indiana/Indianapolis",
|
||||
"US/Hawaii": "Pacific/Honolulu",
|
||||
"US/Indiana-Starke": "America/Indiana/Knox",
|
||||
"US/Michigan": "America/Detroit",
|
||||
"US/Mountain": "America/Denver",
|
||||
"US/Pacific": "America/Los_Angeles",
|
||||
"US/Samoa": "Pacific/Pago_Pago",
|
||||
"W-SU": "Europe/Moscow",
|
||||
"Zulu": "Etc/UTC",
|
||||
}
|
||||
|
||||
|
||||
original_pytz_timezone = pytz.timezone
|
||||
|
||||
|
||||
def timezone(name):
|
||||
if name not in pytz.all_timezones_set and name in _tz_mapping:
|
||||
name = _tz_mapping[name]
|
||||
return original_pytz_timezone(name)
|
||||
|
||||
|
||||
pytz.timezone = timezone
|
||||
1045
odoo-bringout-oca-ocb-base/odoo/tools/_monkeypatches_urls.py
Normal file
1045
odoo-bringout-oca-ocb-base/odoo/tools/_monkeypatches_urls.py
Normal file
File diff suppressed because it is too large
Load diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
223
odoo-bringout-oca-ocb-base/odoo/tools/_vendor/send_file.py
Normal file
223
odoo-bringout-oca-ocb-base/odoo/tools/_vendor/send_file.py
Normal file
|
|
@ -0,0 +1,223 @@
|
|||
"""
|
||||
Vendored copy of the werkzeug.utils.send_file function defined in
|
||||
werkzeug2 which is packaged in Debian 12 "Bookworm" and Ubuntu 22.04
|
||||
"Jammy". Odoo is compatible with werkzeug2 since saas-15.4.
|
||||
|
||||
This vendored copy is deprecated, only present to ensure backward
|
||||
compatibility with older operating systems.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
|
||||
import io
|
||||
import logging
|
||||
import mimetypes
|
||||
import os
|
||||
import typing as t
|
||||
import unicodedata
|
||||
from datetime import datetime
|
||||
from time import time
|
||||
from zlib import adler32
|
||||
|
||||
from werkzeug.datastructures import Headers
|
||||
from werkzeug.exceptions import RequestedRangeNotSatisfiable
|
||||
from werkzeug.urls import url_quote
|
||||
from werkzeug.wrappers import Response
|
||||
from werkzeug.wsgi import wrap_file
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def send_file(
|
||||
path_or_file: t.Union[os.PathLike, str, t.IO[bytes]],
|
||||
environ: "WSGIEnvironment",
|
||||
mimetype: t.Optional[str] = None,
|
||||
as_attachment: bool = False,
|
||||
download_name: t.Optional[str] = None,
|
||||
conditional: bool = True,
|
||||
etag: t.Union[bool, str] = True,
|
||||
last_modified: t.Optional[t.Union[datetime, int, float]] = None,
|
||||
max_age: t.Optional[
|
||||
t.Union[int, t.Callable[[t.Optional[str]], t.Optional[int]]]
|
||||
] = None,
|
||||
use_x_sendfile: bool = False,
|
||||
response_class: t.Optional[t.Type["Response"]] = None,
|
||||
_root_path: t.Optional[t.Union[os.PathLike, str]] = None,
|
||||
) -> "Response":
|
||||
"""Send the contents of a file to the client.
|
||||
|
||||
The first argument can be a file path or a file-like object. Paths
|
||||
are preferred in most cases because Werkzeug can manage the file and
|
||||
get extra information from the path. Passing a file-like object
|
||||
requires that the file is opened in binary mode, and is mostly
|
||||
useful when building a file in memory with :class:`io.BytesIO`.
|
||||
|
||||
Never pass file paths provided by a user. The path is assumed to be
|
||||
trusted, so a user could craft a path to access a file you didn't
|
||||
intend.
|
||||
|
||||
If the WSGI server sets a ``file_wrapper`` in ``environ``, it is
|
||||
used, otherwise Werkzeug's built-in wrapper is used. Alternatively,
|
||||
if the HTTP server supports ``X-Sendfile``, ``use_x_sendfile=True``
|
||||
will tell the server to send the given path, which is much more
|
||||
efficient than reading it in Python.
|
||||
|
||||
:param path_or_file: The path to the file to send, relative to the
|
||||
current working directory if a relative path is given.
|
||||
Alternatively, a file-like object opened in binary mode. Make
|
||||
sure the file pointer is seeked to the start of the data.
|
||||
:param environ: The WSGI environ for the current request.
|
||||
:param mimetype: The MIME type to send for the file. If not
|
||||
provided, it will try to detect it from the file name.
|
||||
:param as_attachment: Indicate to a browser that it should offer to
|
||||
save the file instead of displaying it.
|
||||
:param download_name: The default name browsers will use when saving
|
||||
the file. Defaults to the passed file name.
|
||||
:param conditional: Enable conditional and range responses based on
|
||||
request headers. Requires passing a file path and ``environ``.
|
||||
:param etag: Calculate an ETag for the file, which requires passing
|
||||
a file path. Can also be a string to use instead.
|
||||
:param last_modified: The last modified time to send for the file,
|
||||
in seconds. If not provided, it will try to detect it from the
|
||||
file path.
|
||||
:param max_age: How long the client should cache the file, in
|
||||
seconds. If set, ``Cache-Control`` will be ``public``, otherwise
|
||||
it will be ``no-cache`` to prefer conditional caching.
|
||||
:param use_x_sendfile: Set the ``X-Sendfile`` header to let the
|
||||
server to efficiently send the file. Requires support from the
|
||||
HTTP server. Requires passing a file path.
|
||||
:param response_class: Build the response using this class. Defaults
|
||||
to :class:`~werkzeug.wrappers.Response`.
|
||||
:param _root_path: Do not use. For internal use only. Use
|
||||
:func:`send_from_directory` to safely send files under a path.
|
||||
"""
|
||||
if response_class is None:
|
||||
response_class = Response
|
||||
|
||||
path = None
|
||||
file = None
|
||||
size = None
|
||||
mtime = None
|
||||
headers = Headers()
|
||||
|
||||
if isinstance(path_or_file, (os.PathLike, str)) or hasattr(
|
||||
path_or_file, "__fspath__"
|
||||
):
|
||||
|
||||
# Flask will pass app.root_path, allowing its send_file wrapper
|
||||
# to not have to deal with paths.
|
||||
if _root_path is not None:
|
||||
path = os.path.join(_root_path, path_or_file)
|
||||
else:
|
||||
path = os.path.abspath(path_or_file)
|
||||
|
||||
stat = os.stat(path)
|
||||
size = stat.st_size
|
||||
mtime = stat.st_mtime
|
||||
else:
|
||||
file = path_or_file
|
||||
|
||||
if download_name is None and path is not None:
|
||||
download_name = os.path.basename(path)
|
||||
|
||||
if mimetype is None:
|
||||
if download_name is None:
|
||||
raise TypeError(
|
||||
"Unable to detect the MIME type because a file name is"
|
||||
" not available. Either set 'download_name', pass a"
|
||||
" path instead of a file, or set 'mimetype'."
|
||||
)
|
||||
|
||||
mimetype, encoding = mimetypes.guess_type(download_name)
|
||||
|
||||
if mimetype is None:
|
||||
mimetype = "application/octet-stream"
|
||||
|
||||
# Don't send encoding for attachments, it causes browsers to
|
||||
# save decompress tar.gz files.
|
||||
if encoding is not None and not as_attachment:
|
||||
headers.set("Content-Encoding", encoding)
|
||||
if use_x_sendfile and path is not None:
|
||||
headers["X-Accel-Charset"] = encoding
|
||||
|
||||
if download_name is not None:
|
||||
try:
|
||||
download_name.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
simple = unicodedata.normalize("NFKD", download_name)
|
||||
simple = simple.encode("ascii", "ignore").decode("ascii")
|
||||
quoted = url_quote(download_name, safe="")
|
||||
names = {"filename": simple, "filename*": f"UTF-8''{quoted}"}
|
||||
else:
|
||||
names = {"filename": download_name}
|
||||
|
||||
value = "attachment" if as_attachment else "inline"
|
||||
headers.set("Content-Disposition", value, **names)
|
||||
elif as_attachment:
|
||||
raise TypeError(
|
||||
"No name provided for attachment. Either set"
|
||||
" 'download_name' or pass a path instead of a file."
|
||||
)
|
||||
|
||||
if use_x_sendfile and path is not None:
|
||||
headers["X-Sendfile"] = path
|
||||
data = None
|
||||
else:
|
||||
if file is None:
|
||||
file = open(path, "rb") # type: ignore
|
||||
elif isinstance(file, io.BytesIO):
|
||||
size = file.getbuffer().nbytes
|
||||
elif isinstance(file, io.TextIOBase):
|
||||
raise ValueError("Files must be opened in binary mode or use BytesIO.")
|
||||
|
||||
data = wrap_file(environ, file)
|
||||
|
||||
rv = response_class(
|
||||
data, mimetype=mimetype, headers=headers, direct_passthrough=True
|
||||
)
|
||||
|
||||
if size is not None:
|
||||
rv.content_length = size
|
||||
|
||||
if last_modified is not None:
|
||||
rv.last_modified = last_modified # type: ignore
|
||||
elif mtime is not None:
|
||||
rv.last_modified = mtime # type: ignore
|
||||
|
||||
rv.cache_control.no_cache = True
|
||||
|
||||
# Flask will pass app.get_send_file_max_age, allowing its send_file
|
||||
# wrapper to not have to deal with paths.
|
||||
if callable(max_age):
|
||||
max_age = max_age(path)
|
||||
|
||||
if max_age is not None:
|
||||
if max_age > 0:
|
||||
rv.cache_control.no_cache = None
|
||||
rv.cache_control.public = True
|
||||
|
||||
rv.cache_control.max_age = max_age
|
||||
rv.expires = int(time() + max_age) # type: ignore
|
||||
|
||||
if isinstance(etag, str):
|
||||
rv.set_etag(etag)
|
||||
elif etag and path is not None:
|
||||
check = adler32(path.encode("utf-8")) & 0xFFFFFFFF
|
||||
rv.set_etag(f"{mtime}-{size}-{check}")
|
||||
|
||||
if conditional:
|
||||
try:
|
||||
rv = rv.make_conditional(environ, accept_ranges=True, complete_length=size)
|
||||
except RequestedRangeNotSatisfiable:
|
||||
if file is not None:
|
||||
file.close()
|
||||
|
||||
raise
|
||||
|
||||
# Some x-sendfile implementations incorrectly ignore the 304
|
||||
# status code and send the file anyway.
|
||||
if rv.status_code == 304:
|
||||
rv.headers.pop("x-sendfile", None)
|
||||
|
||||
return rv
|
||||
250
odoo-bringout-oca-ocb-base/odoo/tools/_vendor/sessions.py
Normal file
250
odoo-bringout-oca-ocb-base/odoo/tools/_vendor/sessions.py
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
Vendored copy of https://github.com/pallets/werkzeug/blob/2b2c4c3dd3cf7389e9f4aa06371b7332257c6289/src/werkzeug/contrib/sessions.py
|
||||
|
||||
werkzeug.contrib was removed from werkzeug 1.0. sessions (and secure
|
||||
cookies) were moved to the secure-cookies package. Problem is distros
|
||||
are starting to update werkzeug to 1.0 without having secure-cookies
|
||||
(e.g. Arch has done so, Debian has updated python-werkzeug in
|
||||
"experimental"), which will be problematic once that starts trickling
|
||||
down onto more stable distros and people start deploying that.
|
||||
|
||||
Edited some to fix imports and remove some compatibility things
|
||||
(mostly PY2) and the unnecessary (to us) SessionMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
from hashlib import sha1
|
||||
from os import path, replace as rename
|
||||
from odoo.tools.misc import pickle
|
||||
from time import time
|
||||
|
||||
from werkzeug.datastructures import CallbackDict
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
_sha1_re = re.compile(r"^[a-f0-9]{40}$")
|
||||
|
||||
|
||||
def generate_key(salt=None):
|
||||
if salt is None:
|
||||
salt = repr(salt).encode("ascii")
|
||||
return sha1(b"".join([salt, str(time()).encode("ascii"), os.urandom(30)])).hexdigest()
|
||||
|
||||
|
||||
class ModificationTrackingDict(CallbackDict):
|
||||
__slots__ = ("modified", "on_update")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
def on_update(self):
|
||||
self.modified = True
|
||||
|
||||
self.modified = False
|
||||
CallbackDict.__init__(self, on_update=on_update)
|
||||
dict.update(self, *args, **kwargs)
|
||||
|
||||
def copy(self):
|
||||
"""Create a flat copy of the dict."""
|
||||
missing = object()
|
||||
result = object.__new__(self.__class__)
|
||||
for name in self.__slots__:
|
||||
val = getattr(self, name, missing)
|
||||
if val is not missing:
|
||||
setattr(result, name, val)
|
||||
return result
|
||||
|
||||
def __copy__(self):
|
||||
return self.copy()
|
||||
|
||||
|
||||
class Session(ModificationTrackingDict):
|
||||
"""Subclass of a dict that keeps track of direct object changes. Changes
|
||||
in mutable structures are not tracked, for those you have to set
|
||||
`modified` to `True` by hand.
|
||||
"""
|
||||
|
||||
__slots__ = ModificationTrackingDict.__slots__ + ("sid", "new")
|
||||
|
||||
def __init__(self, data, sid, new=False):
|
||||
ModificationTrackingDict.__init__(self, data)
|
||||
self.sid = sid
|
||||
self.new = new
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s%s>" % (
|
||||
self.__class__.__name__,
|
||||
dict.__repr__(self),
|
||||
"*" if self.should_save else "",
|
||||
)
|
||||
|
||||
@property
|
||||
def should_save(self):
|
||||
"""True if the session should be saved.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
By default the session is now only saved if the session is
|
||||
modified, not if it is new like it was before.
|
||||
"""
|
||||
return self.modified
|
||||
|
||||
|
||||
class SessionStore(object):
|
||||
"""Baseclass for all session stores. The Werkzeug contrib module does not
|
||||
implement any useful stores besides the filesystem store, application
|
||||
developers are encouraged to create their own stores.
|
||||
|
||||
:param session_class: The session class to use. Defaults to
|
||||
:class:`Session`.
|
||||
"""
|
||||
|
||||
def __init__(self, session_class=None):
|
||||
if session_class is None:
|
||||
session_class = Session
|
||||
self.session_class = session_class
|
||||
|
||||
def is_valid_key(self, key):
|
||||
"""Check if a key has the correct format."""
|
||||
return _sha1_re.match(key) is not None
|
||||
|
||||
def generate_key(self, salt=None):
|
||||
"""Simple function that generates a new session key."""
|
||||
return generate_key(salt)
|
||||
|
||||
def new(self):
|
||||
"""Generate a new session."""
|
||||
return self.session_class({}, self.generate_key(), True)
|
||||
|
||||
def save(self, session):
|
||||
"""Save a session."""
|
||||
|
||||
def save_if_modified(self, session):
|
||||
"""Save if a session class wants an update."""
|
||||
if session.should_save:
|
||||
self.save(session)
|
||||
|
||||
def delete(self, session):
|
||||
"""Delete a session."""
|
||||
|
||||
def get(self, sid):
|
||||
"""Get a session for this sid or a new session object. This method
|
||||
has to check if the session key is valid and create a new session if
|
||||
that wasn't the case.
|
||||
"""
|
||||
return self.session_class({}, sid, True)
|
||||
|
||||
|
||||
#: used for temporary files by the filesystem session store
|
||||
_fs_transaction_suffix = ".__wz_sess"
|
||||
|
||||
|
||||
class FilesystemSessionStore(SessionStore):
|
||||
"""Simple example session store that saves sessions on the filesystem.
|
||||
This store works best on POSIX systems and Windows Vista / Windows
|
||||
Server 2008 and newer.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
`renew_missing` was added. Previously this was considered `True`,
|
||||
now the default changed to `False` and it can be explicitly
|
||||
deactivated.
|
||||
|
||||
:param path: the path to the folder used for storing the sessions.
|
||||
If not provided the default temporary directory is used.
|
||||
:param filename_template: a string template used to give the session
|
||||
a filename. ``%s`` is replaced with the
|
||||
session id.
|
||||
:param session_class: The session class to use. Defaults to
|
||||
:class:`Session`.
|
||||
:param renew_missing: set to `True` if you want the store to
|
||||
give the user a new sid if the session was
|
||||
not yet saved.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path=None,
|
||||
filename_template="werkzeug_%s.sess",
|
||||
session_class=None,
|
||||
renew_missing=False,
|
||||
mode=0o644,
|
||||
):
|
||||
SessionStore.__init__(self, session_class)
|
||||
if path is None:
|
||||
path = tempfile.gettempdir()
|
||||
self.path = path
|
||||
assert not filename_template.endswith(_fs_transaction_suffix), (
|
||||
"filename templates may not end with %s" % _fs_transaction_suffix
|
||||
)
|
||||
self.filename_template = filename_template
|
||||
self.renew_missing = renew_missing
|
||||
self.mode = mode
|
||||
|
||||
def get_session_filename(self, sid):
|
||||
# out of the box, this should be a strict ASCII subset but
|
||||
# you might reconfigure the session object to have a more
|
||||
# arbitrary string.
|
||||
return path.join(self.path, self.filename_template % sid)
|
||||
|
||||
def save(self, session):
|
||||
fn = self.get_session_filename(session.sid)
|
||||
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path)
|
||||
f = os.fdopen(fd, "wb")
|
||||
try:
|
||||
pickle.dump(dict(session), f, pickle.HIGHEST_PROTOCOL)
|
||||
finally:
|
||||
f.close()
|
||||
try:
|
||||
rename(tmp, fn)
|
||||
os.chmod(fn, self.mode)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
def delete(self, session):
|
||||
fn = self.get_session_filename(session.sid)
|
||||
try:
|
||||
os.unlink(fn)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def get(self, sid):
|
||||
if not self.is_valid_key(sid):
|
||||
return self.new()
|
||||
try:
|
||||
f = open(self.get_session_filename(sid), "rb")
|
||||
except IOError:
|
||||
_logger.debug('Could not load session from disk. Use empty session.', exc_info=True)
|
||||
if self.renew_missing:
|
||||
return self.new()
|
||||
data = {}
|
||||
else:
|
||||
try:
|
||||
try:
|
||||
data = pickle.load(f, errors={})
|
||||
except Exception:
|
||||
_logger.debug('Could not load session data. Use empty session.', exc_info=True)
|
||||
data = {}
|
||||
finally:
|
||||
f.close()
|
||||
return self.session_class(data, sid, False)
|
||||
|
||||
def list(self):
|
||||
"""Lists all sessions in the store.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
before, after = self.filename_template.split("%s", 1)
|
||||
filename_re = re.compile(
|
||||
r"%s(.{5,})%s$" % (re.escape(before), re.escape(after))
|
||||
)
|
||||
result = []
|
||||
for filename in os.listdir(self.path):
|
||||
#: this is a session that is still being saved.
|
||||
if filename.endswith(_fs_transaction_suffix):
|
||||
continue
|
||||
match = filename_re.match(filename)
|
||||
if match is not None:
|
||||
result.append(match.group(1))
|
||||
return result
|
||||
204
odoo-bringout-oca-ocb-base/odoo/tools/_vendor/useragents.py
Normal file
204
odoo-bringout-oca-ocb-base/odoo/tools/_vendor/useragents.py
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.useragents
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides a helper to inspect user agent strings. This module
|
||||
is far from complete but should work for most of the currently available
|
||||
browsers.
|
||||
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
|
||||
This package was vendored in odoo in order to prevent errors with werkzeug 2.1
|
||||
"""
|
||||
import re
|
||||
|
||||
|
||||
class UserAgentParser(object):
|
||||
"""A simple user agent parser. Used by the `UserAgent`."""
|
||||
|
||||
platforms = (
|
||||
("cros", "chromeos"),
|
||||
("iphone|ios", "iphone"),
|
||||
("ipad", "ipad"),
|
||||
(r"darwin|mac|os\s*x", "macos"),
|
||||
("win", "windows"),
|
||||
(r"android", "android"),
|
||||
("netbsd", "netbsd"),
|
||||
("openbsd", "openbsd"),
|
||||
("freebsd", "freebsd"),
|
||||
("dragonfly", "dragonflybsd"),
|
||||
("(sun|i86)os", "solaris"),
|
||||
(r"x11|lin(\b|ux)?", "linux"),
|
||||
(r"nintendo\s+wii", "wii"),
|
||||
("irix", "irix"),
|
||||
("hp-?ux", "hpux"),
|
||||
("aix", "aix"),
|
||||
("sco|unix_sv", "sco"),
|
||||
("bsd", "bsd"),
|
||||
("amiga", "amiga"),
|
||||
("blackberry|playbook", "blackberry"),
|
||||
("symbian", "symbian"),
|
||||
)
|
||||
browsers = (
|
||||
("googlebot", "google"),
|
||||
("msnbot", "msn"),
|
||||
("yahoo", "yahoo"),
|
||||
("ask jeeves", "ask"),
|
||||
(r"aol|america\s+online\s+browser", "aol"),
|
||||
("opera", "opera"),
|
||||
("edge", "edge"),
|
||||
("chrome|crios", "chrome"),
|
||||
("seamonkey", "seamonkey"),
|
||||
("firefox|firebird|phoenix|iceweasel", "firefox"),
|
||||
("galeon", "galeon"),
|
||||
("safari|version", "safari"),
|
||||
("webkit", "webkit"),
|
||||
("camino", "camino"),
|
||||
("konqueror", "konqueror"),
|
||||
("k-meleon", "kmeleon"),
|
||||
("netscape", "netscape"),
|
||||
(r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
|
||||
("lynx", "lynx"),
|
||||
("links", "links"),
|
||||
("Baiduspider", "baidu"),
|
||||
("bingbot", "bing"),
|
||||
("mozilla", "mozilla"),
|
||||
)
|
||||
|
||||
_browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
|
||||
_language_re = re.compile(
|
||||
r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
|
||||
r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
|
||||
self.browsers = [
|
||||
(b, re.compile(self._browser_version_re % a, re.I))
|
||||
for a, b in self.browsers
|
||||
]
|
||||
|
||||
def __call__(self, user_agent):
|
||||
for platform, regex in self.platforms: # noqa: B007
|
||||
match = regex.search(user_agent)
|
||||
if match is not None:
|
||||
break
|
||||
else:
|
||||
platform = None
|
||||
for browser, regex in self.browsers: # noqa: B007
|
||||
match = regex.search(user_agent)
|
||||
if match is not None:
|
||||
version = match.group(1)
|
||||
break
|
||||
else:
|
||||
browser = version = None
|
||||
match = self._language_re.search(user_agent)
|
||||
if match is not None:
|
||||
language = match.group(1) or match.group(2)
|
||||
else:
|
||||
language = None
|
||||
return platform, browser, version, language
|
||||
|
||||
|
||||
class UserAgent(object):
|
||||
"""Represents a user agent. Pass it a WSGI environment or a user agent
|
||||
string and you can inspect some of the details from the user agent
|
||||
string via the attributes. The following attributes exist:
|
||||
|
||||
.. attribute:: string
|
||||
|
||||
the raw user agent string
|
||||
|
||||
.. attribute:: platform
|
||||
|
||||
the browser platform. The following platforms are currently
|
||||
recognized:
|
||||
|
||||
- `aix`
|
||||
- `amiga`
|
||||
- `android`
|
||||
- `blackberry`
|
||||
- `bsd`
|
||||
- `chromeos`
|
||||
- `dragonflybsd`
|
||||
- `freebsd`
|
||||
- `hpux`
|
||||
- `ipad`
|
||||
- `iphone`
|
||||
- `irix`
|
||||
- `linux`
|
||||
- `macos`
|
||||
- `netbsd`
|
||||
- `openbsd`
|
||||
- `sco`
|
||||
- `solaris`
|
||||
- `symbian`
|
||||
- `wii`
|
||||
- `windows`
|
||||
|
||||
.. attribute:: browser
|
||||
|
||||
the name of the browser. The following browsers are currently
|
||||
recognized:
|
||||
|
||||
- `aol` *
|
||||
- `ask` *
|
||||
- `baidu` *
|
||||
- `bing` *
|
||||
- `camino`
|
||||
- `chrome`
|
||||
- `edge`
|
||||
- `firefox`
|
||||
- `galeon`
|
||||
- `google` *
|
||||
- `kmeleon`
|
||||
- `konqueror`
|
||||
- `links`
|
||||
- `lynx`
|
||||
- `mozilla`
|
||||
- `msie`
|
||||
- `msn`
|
||||
- `netscape`
|
||||
- `opera`
|
||||
- `safari`
|
||||
- `seamonkey`
|
||||
- `webkit`
|
||||
- `yahoo` *
|
||||
|
||||
(Browsers marked with a star (``*``) are crawlers.)
|
||||
|
||||
.. attribute:: version
|
||||
|
||||
the version of the browser
|
||||
|
||||
.. attribute:: language
|
||||
|
||||
the language of the browser
|
||||
"""
|
||||
|
||||
_parser = UserAgentParser()
|
||||
|
||||
def __init__(self, environ_or_string):
|
||||
if isinstance(environ_or_string, dict):
|
||||
environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
|
||||
self.string = environ_or_string
|
||||
self.platform, self.browser, self.version, self.language = self._parser(
|
||||
environ_or_string
|
||||
)
|
||||
|
||||
def to_header(self):
|
||||
return self.string
|
||||
|
||||
def __str__(self):
|
||||
return self.string
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self.browser)
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
|
||||
493
odoo-bringout-oca-ocb-base/odoo/tools/appdirs.py
Normal file
493
odoo-bringout-oca-ocb-base/odoo/tools/appdirs.py
Normal file
|
|
@ -0,0 +1,493 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
||||
# Copyright (c) 2013 Eddy Petrișor
|
||||
|
||||
"""Utilities for determining application-specific dirs.
|
||||
|
||||
See <http://github.com/ActiveState/appdirs> for details and usage.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
# Dev Notes:
|
||||
# - MSDN on where to store app data files:
|
||||
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
||||
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
||||
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
|
||||
__version_info__ = (1, 3, 0)
|
||||
__version__ = '.'.join(str(v) for v in __version_info__)
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only required and used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user data directories are:
|
||||
Mac OS X: ~/Library/Application Support/<AppName>
|
||||
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
||||
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
||||
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
||||
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
||||
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
||||
That means, by default "~/.local/share/<AppName>".
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
||||
path = os.path.normpath(_get_win_folder(const))
|
||||
if appname:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
elif sys.platform == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Application Support/')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
r"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only required and used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to \*nix
|
||||
which indicates that the entire list of data dirs should be
|
||||
returned. By default, the first item from XDG_DATA_DIRS is
|
||||
returned, or :samp:`/usr/local/share/{AppName}`,
|
||||
if ``XDG_DATA_DIRS`` is not set
|
||||
|
||||
Typical user data directories are:
|
||||
|
||||
Mac OS X
|
||||
:samp:`/Library/Application Support/{AppName}`
|
||||
Unix
|
||||
:samp:`/usr/local/share/{AppName}` or :samp:`/usr/share/{AppName}`
|
||||
Win XP
|
||||
:samp:`C:\Documents and Settings\All Users\Application Data\{AppAuthor}\{AppName}`
|
||||
Vista
|
||||
Fail! "C:\ProgramData" is a hidden *system* directory on Vista.
|
||||
Win 7
|
||||
:samp:`C:\ProgramData\{AppAuthor}\{AppName}` (hidden, but writeable on Win 7)
|
||||
|
||||
For Unix, this is using the ``$XDG_DATA_DIRS[0]`` default.
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
||||
if appname:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
elif sys.platform == 'darwin':
|
||||
path = os.path.expanduser('/Library/Application Support')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
# XDG default for $XDG_DATA_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_DATA_DIRS',
|
||||
os.pathsep.join(['/usr/local/share', '/usr/share']))
|
||||
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
"""Return full path to the user-specific config dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only required and used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See `managing roaming user data
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user data directories are:
|
||||
|
||||
Mac OS X
|
||||
same as user_data_dir
|
||||
Unix
|
||||
:samp:`~/.config/{AppName}` or in $XDG_CONFIG_HOME, if defined
|
||||
Win *
|
||||
same as user_data_dir
|
||||
|
||||
For Unix, we follow the XDG spec and support ``$XDG_DATA_HOME``.
|
||||
That means, by default :samp:`~/.local/share/{AppName}`.
|
||||
"""
|
||||
if sys.platform in [ "win32", "darwin" ]:
|
||||
path = user_data_dir(appname, appauthor, None, roaming)
|
||||
else:
|
||||
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
r"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only required and used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to \*nix
|
||||
which indicates that the entire list of config dirs should be
|
||||
returned. By default, the first item from ``XDG_CONFIG_DIRS`` is
|
||||
returned, or :samp:`/etc/xdg/{AppName}`, if ``XDG_CONFIG_DIRS`` is not set
|
||||
|
||||
Typical user data directories are:
|
||||
|
||||
Mac OS X
|
||||
same as site_data_dir
|
||||
Unix
|
||||
``/etc/xdg/<AppName>`` or ``$XDG_CONFIG_DIRS[i]/<AppName>`` for each
|
||||
value in ``$XDG_CONFIG_DIRS``
|
||||
Win *
|
||||
same as site_data_dir
|
||||
Vista
|
||||
Fail! "C:\ProgramData" is a hidden *system* directory on Vista.
|
||||
|
||||
For Unix, this is using the ``$XDG_CONFIG_DIRS[0]`` default, if ``multipath=False``
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if sys.platform in [ "win32", "darwin" ]:
|
||||
path = site_data_dir(appname, appauthor)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
else:
|
||||
# XDG default for $XDG_CONFIG_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
|
||||
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific cache dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only required and used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Cache" to the base app data dir for Windows. See
|
||||
discussion below.
|
||||
|
||||
Typical user cache directories are:
|
||||
|
||||
Mac OS X
|
||||
~/Library/Caches/<AppName>
|
||||
Unix
|
||||
~/.cache/<AppName> (XDG default)
|
||||
Win XP
|
||||
C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
||||
Vista
|
||||
C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings go in
|
||||
the ``CSIDL_LOCAL_APPDATA`` directory. This is identical to the non-roaming
|
||||
app data dir (the default returned by ``user_data_dir`` above). Apps typically
|
||||
put cache data somewhere *under* the given dir here. Some examples:
|
||||
|
||||
- ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
||||
- ...\Acme\SuperApp\Cache\1.0
|
||||
|
||||
OPINION: This function appends "Cache" to the ``CSIDL_LOCAL_APPDATA`` value.
|
||||
This can be disabled with the ``opinion=False`` option.
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
||||
if appname:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
if opinion:
|
||||
path = os.path.join(path, "Cache")
|
||||
elif sys.platform == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Caches')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific log dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only required and used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Logs" to the base app data dir for Windows, and "log" to the
|
||||
base cache dir for Unix. See discussion below.
|
||||
|
||||
Typical user cache directories are:
|
||||
Mac OS X: ~/Library/Logs/<AppName>
|
||||
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings
|
||||
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
||||
examples of what some windows apps use for a logs dir.)
|
||||
|
||||
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
||||
value for Windows and appends "log" to the user cache dir for Unix.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if sys.platform == "darwin":
|
||||
path = os.path.join(
|
||||
os.path.expanduser('~/Library/Logs'),
|
||||
appname)
|
||||
elif sys.platform == "win32":
|
||||
path = user_data_dir(appname, appauthor, version); version=False
|
||||
if opinion:
|
||||
path = os.path.join(path, "Logs")
|
||||
else:
|
||||
path = user_cache_dir(appname, appauthor, version); version=False
|
||||
if opinion:
|
||||
path = os.path.join(path, "log")
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
class AppDirs(object):
|
||||
"""Convenience wrapper for getting application dirs."""
|
||||
def __init__(self, appname, appauthor=None, version=None,
|
||||
roaming=False, multipath=False):
|
||||
self.appname = appname
|
||||
self.appauthor = appauthor
|
||||
self.version = version
|
||||
self.roaming = roaming
|
||||
self.multipath = multipath
|
||||
@property
|
||||
def user_data_dir(self):
|
||||
return user_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
@property
|
||||
def site_data_dir(self):
|
||||
return site_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
@property
|
||||
def user_config_dir(self):
|
||||
return user_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
@property
|
||||
def site_config_dir(self):
|
||||
return site_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
@property
|
||||
def user_cache_dir(self):
|
||||
return user_cache_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
@property
|
||||
def user_log_dir(self):
|
||||
return user_log_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
|
||||
|
||||
|
||||
#---- internal support stuff
|
||||
|
||||
def _get_win_folder_from_registry(csidl_name):
|
||||
"""This is a fallback technique at best. I'm not sure if using the
|
||||
registry for this guarantees us the correct answer for all CSIDL_*
|
||||
names.
|
||||
"""
|
||||
import winreg as _winreg
|
||||
|
||||
shell_folder_name = {
|
||||
"CSIDL_APPDATA": "AppData",
|
||||
"CSIDL_COMMON_APPDATA": "Common AppData",
|
||||
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
||||
}[csidl_name]
|
||||
|
||||
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
|
||||
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
|
||||
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
||||
return dir
|
||||
|
||||
def _get_win_folder_with_pywin32(csidl_name):
|
||||
from win32com.shell import shellcon, shell
|
||||
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
||||
# Try to make this a unicode path because SHGetFolderPath does
|
||||
# not return unicode strings when there is unicode data in the
|
||||
# path.
|
||||
try:
|
||||
dir = str(dir)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in dir:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
try:
|
||||
import win32api
|
||||
dir = win32api.GetShortPathName(dir)
|
||||
except ImportError:
|
||||
pass
|
||||
except UnicodeError:
|
||||
pass
|
||||
return dir
|
||||
|
||||
def _get_win_folder_with_ctypes(csidl_name):
|
||||
import ctypes
|
||||
|
||||
csidl_const = {
|
||||
"CSIDL_APPDATA": 26,
|
||||
"CSIDL_COMMON_APPDATA": 35,
|
||||
"CSIDL_LOCAL_APPDATA": 28,
|
||||
}[csidl_name]
|
||||
|
||||
buf = ctypes.create_unicode_buffer(1024)
|
||||
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in buf:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf2 = ctypes.create_unicode_buffer(1024)
|
||||
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
||||
buf = buf2
|
||||
|
||||
return buf.value
|
||||
|
||||
if sys.platform == "win32":
|
||||
try:
|
||||
import win32com.shell
|
||||
_get_win_folder = _get_win_folder_with_pywin32
|
||||
except ImportError:
|
||||
try:
|
||||
import ctypes
|
||||
_get_win_folder = _get_win_folder_with_ctypes
|
||||
except ImportError:
|
||||
_get_win_folder = _get_win_folder_from_registry
|
||||
|
||||
|
||||
|
||||
#---- self test code
|
||||
|
||||
if __name__ == "__main__":
|
||||
appname = "MyApp"
|
||||
appauthor = "MyCompany"
|
||||
|
||||
props = ("user_data_dir", "site_data_dir",
|
||||
"user_config_dir", "site_config_dir",
|
||||
"user_cache_dir", "user_log_dir")
|
||||
|
||||
print("-- app dirs (with optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor, version="1.0")
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'appauthor')")
|
||||
dirs = AppDirs(appname)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
54
odoo-bringout-oca-ocb-base/odoo/tools/barcode.py
Normal file
54
odoo-bringout-oca-ocb-base/odoo/tools/barcode.py
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
import re
|
||||
|
||||
|
||||
def get_barcode_check_digit(numeric_barcode):
|
||||
""" Computes and returns the barcode check digit. The used algorithm
|
||||
follows the GTIN specifications and can be used by all compatible
|
||||
barcode nomenclature, like as EAN-8, EAN-12 (UPC-A) or EAN-13.
|
||||
https://www.gs1.org/sites/default/files/docs/barcodes/GS1_General_Specifications.pdf
|
||||
https://www.gs1.org/services/how-calculate-check-digit-manually
|
||||
:param numeric_barcode: the barcode to verify/recompute the check digit
|
||||
:type numeric_barcode: str
|
||||
:return: the number corresponding to the right check digit
|
||||
:rtype: int
|
||||
"""
|
||||
# Multiply value of each position by
|
||||
# N1 N2 N3 N4 N5 N6 N7 N8 N9 N10 N11 N12 N13 N14 N15 N16 N17 N18
|
||||
# x3 X1 x3 x1 x3 x1 x3 x1 x3 x1 x3 x1 x3 x1 x3 x1 x3 CHECKSUM
|
||||
oddsum = evensum = 0
|
||||
code = numeric_barcode[-2::-1] # Remove the check digit and reverse the barcode.
|
||||
# The CHECKSUM digit is removed because it will be recomputed and it must not interfer with
|
||||
# the computation. Also, the barcode is inverted, so the barcode length doesn't matter.
|
||||
# Otherwise, the digits' group (even or odd) could be different according to the barcode length.
|
||||
for i, digit in enumerate(code):
|
||||
if i % 2 == 0:
|
||||
evensum += int(digit)
|
||||
else:
|
||||
oddsum += int(digit)
|
||||
total = evensum * 3 + oddsum
|
||||
return (10 - total % 10) % 10
|
||||
|
||||
|
||||
def check_barcode_encoding(barcode, encoding):
|
||||
""" Checks if the given barcode is correctly encoded.
|
||||
:return: True if the barcode string is encoded with the provided encoding.
|
||||
:rtype: bool
|
||||
"""
|
||||
encoding = encoding.lower()
|
||||
if encoding == "any":
|
||||
return True
|
||||
barcode_sizes = {
|
||||
'ean8': 8,
|
||||
'ean13': 13,
|
||||
'gtin14': 14,
|
||||
'upca': 12,
|
||||
'sscc': 18,
|
||||
}
|
||||
barcode_size = barcode_sizes[encoding]
|
||||
return (encoding != 'ean13' or barcode[0] != '0') \
|
||||
and len(barcode) == barcode_size \
|
||||
and re.match(r"^\d+$", barcode) \
|
||||
and get_barcode_check_digit(barcode) == int(barcode[-1])
|
||||
232
odoo-bringout-oca-ocb-base/odoo/tools/cache.py
Normal file
232
odoo-bringout-oca-ocb-base/odoo/tools/cache.py
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
# decorator makes wrappers that have the same API as their wrapped function
|
||||
from collections import Counter, defaultdict
|
||||
from decorator import decorator
|
||||
from inspect import signature
|
||||
import logging
|
||||
|
||||
unsafe_eval = eval
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ormcache_counter(object):
|
||||
""" Statistic counters for cache entries. """
|
||||
__slots__ = ['hit', 'miss', 'err']
|
||||
|
||||
def __init__(self):
|
||||
self.hit = 0
|
||||
self.miss = 0
|
||||
self.err = 0
|
||||
|
||||
@property
|
||||
def ratio(self):
|
||||
return 100.0 * self.hit / (self.hit + self.miss or 1)
|
||||
|
||||
# statistic counters dictionary, maps (dbname, modelname, method) to counter
|
||||
STAT = defaultdict(ormcache_counter)
|
||||
|
||||
|
||||
class ormcache(object):
|
||||
""" LRU cache decorator for model methods.
|
||||
The parameters are strings that represent expressions referring to the
|
||||
signature of the decorated method, and are used to compute a cache key::
|
||||
|
||||
@ormcache('model_name', 'mode')
|
||||
def _compute_domain(self, model_name, mode="read"):
|
||||
...
|
||||
|
||||
For the sake of backward compatibility, the decorator supports the named
|
||||
parameter `skiparg`::
|
||||
|
||||
@ormcache(skiparg=1)
|
||||
def _compute_domain(self, model_name, mode="read"):
|
||||
...
|
||||
|
||||
Methods implementing this decorator should never return a Recordset,
|
||||
because the underlying cursor will eventually be closed and raise a
|
||||
`psycopg2.InterfaceError`.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.args = args
|
||||
self.skiparg = kwargs.get('skiparg')
|
||||
|
||||
def __call__(self, method):
|
||||
self.method = method
|
||||
self.determine_key()
|
||||
lookup = decorator(self.lookup, method)
|
||||
lookup.clear_cache = self.clear
|
||||
return lookup
|
||||
|
||||
def determine_key(self):
|
||||
""" Determine the function that computes a cache key from arguments. """
|
||||
if self.skiparg is None:
|
||||
# build a string that represents function code and evaluate it
|
||||
args = str(signature(self.method))[1:-1]
|
||||
if self.args:
|
||||
code = "lambda %s: (%s,)" % (args, ", ".join(self.args))
|
||||
else:
|
||||
code = "lambda %s: ()" % (args,)
|
||||
self.key = unsafe_eval(code)
|
||||
else:
|
||||
# backward-compatible function that uses self.skiparg
|
||||
self.key = lambda *args, **kwargs: args[self.skiparg:]
|
||||
|
||||
def lru(self, model):
|
||||
counter = STAT[(model.pool.db_name, model._name, self.method)]
|
||||
return model.pool._Registry__cache, (model._name, self.method), counter
|
||||
|
||||
def lookup(self, method, *args, **kwargs):
|
||||
d, key0, counter = self.lru(args[0])
|
||||
key = key0 + self.key(*args, **kwargs)
|
||||
try:
|
||||
r = d[key]
|
||||
counter.hit += 1
|
||||
return r
|
||||
except KeyError:
|
||||
counter.miss += 1
|
||||
value = d[key] = self.method(*args, **kwargs)
|
||||
return value
|
||||
except TypeError:
|
||||
_logger.warning("cache lookup error on %r", key, exc_info=True)
|
||||
counter.err += 1
|
||||
return self.method(*args, **kwargs)
|
||||
|
||||
def clear(self, model, *args):
|
||||
""" Clear the registry cache """
|
||||
model.pool._clear_cache()
|
||||
|
||||
|
||||
class ormcache_context(ormcache):
|
||||
""" This LRU cache decorator is a variant of :class:`ormcache`, with an
|
||||
extra parameter ``keys`` that defines a sequence of dictionary keys. Those
|
||||
keys are looked up in the ``context`` parameter and combined to the cache
|
||||
key made by :class:`ormcache`.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ormcache_context, self).__init__(*args, **kwargs)
|
||||
self.keys = kwargs['keys']
|
||||
|
||||
def determine_key(self):
|
||||
""" Determine the function that computes a cache key from arguments. """
|
||||
assert self.skiparg is None, "ormcache_context() no longer supports skiparg"
|
||||
# build a string that represents function code and evaluate it
|
||||
sign = signature(self.method)
|
||||
args = str(sign)[1:-1]
|
||||
cont_expr = "(context or {})" if 'context' in sign.parameters else "self._context"
|
||||
keys_expr = "tuple(%s.get(k) for k in %r)" % (cont_expr, self.keys)
|
||||
if self.args:
|
||||
code = "lambda %s: (%s, %s)" % (args, ", ".join(self.args), keys_expr)
|
||||
else:
|
||||
code = "lambda %s: (%s,)" % (args, keys_expr)
|
||||
self.key = unsafe_eval(code)
|
||||
|
||||
|
||||
class ormcache_multi(ormcache):
|
||||
""" This LRU cache decorator is a variant of :class:`ormcache`, with an
|
||||
extra parameter ``multi`` that gives the name of a parameter. Upon call, the
|
||||
corresponding argument is iterated on, and every value leads to a cache
|
||||
entry under its own key.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ormcache_multi, self).__init__(*args, **kwargs)
|
||||
self.multi = kwargs['multi']
|
||||
|
||||
def determine_key(self):
|
||||
""" Determine the function that computes a cache key from arguments. """
|
||||
assert self.skiparg is None, "ormcache_multi() no longer supports skiparg"
|
||||
assert isinstance(self.multi, str), "ormcache_multi() parameter multi must be an argument name"
|
||||
|
||||
super(ormcache_multi, self).determine_key()
|
||||
|
||||
# key_multi computes the extra element added to the key
|
||||
sign = signature(self.method)
|
||||
args = str(sign)[1:-1]
|
||||
code_multi = "lambda %s: %s" % (args, self.multi)
|
||||
self.key_multi = unsafe_eval(code_multi)
|
||||
|
||||
# self.multi_pos is the position of self.multi in args
|
||||
self.multi_pos = list(sign.parameters).index(self.multi)
|
||||
|
||||
def lookup(self, method, *args, **kwargs):
|
||||
d, key0, counter = self.lru(args[0])
|
||||
base_key = key0 + self.key(*args, **kwargs)
|
||||
ids = self.key_multi(*args, **kwargs)
|
||||
result = {}
|
||||
missed = []
|
||||
|
||||
# first take what is available in the cache
|
||||
for i in ids:
|
||||
key = base_key + (i,)
|
||||
try:
|
||||
result[i] = d[key]
|
||||
counter.hit += 1
|
||||
except Exception:
|
||||
counter.miss += 1
|
||||
missed.append(i)
|
||||
|
||||
if missed:
|
||||
# call the method for the ids that were not in the cache; note that
|
||||
# thanks to decorator(), the multi argument will be bound and passed
|
||||
# positionally in args.
|
||||
args = list(args)
|
||||
args[self.multi_pos] = missed
|
||||
result.update(method(*args, **kwargs))
|
||||
|
||||
# store those new results back in the cache
|
||||
for i in missed:
|
||||
key = base_key + (i,)
|
||||
d[key] = result[i]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class dummy_cache(object):
|
||||
""" Cache decorator replacement to actually do no caching. """
|
||||
def __init__(self, *l, **kw):
|
||||
pass
|
||||
|
||||
def __call__(self, fn):
|
||||
fn.clear_cache = self.clear
|
||||
return fn
|
||||
|
||||
def clear(self, *l, **kw):
|
||||
pass
|
||||
|
||||
|
||||
def log_ormcache_stats(sig=None, frame=None):
|
||||
""" Log statistics of ormcache usage by database, model, and method. """
|
||||
from odoo.modules.registry import Registry
|
||||
import threading
|
||||
|
||||
me = threading.current_thread()
|
||||
me_dbname = getattr(me, 'dbname', 'n/a')
|
||||
|
||||
for dbname, reg in sorted(Registry.registries.d.items()):
|
||||
# set logger prefix to dbname
|
||||
me.dbname = dbname
|
||||
entries = Counter(k[:2] for k in reg._Registry__cache.d)
|
||||
# show entries sorted by model name, method name
|
||||
for key in sorted(entries, key=lambda key: (key[0], key[1].__name__)):
|
||||
model, method = key
|
||||
stat = STAT[(dbname, model, method)]
|
||||
_logger.info(
|
||||
"%6d entries, %6d hit, %6d miss, %6d err, %4.1f%% ratio, for %s.%s",
|
||||
entries[key], stat.hit, stat.miss, stat.err, stat.ratio, model, method.__name__,
|
||||
)
|
||||
|
||||
me.dbname = me_dbname
|
||||
|
||||
|
||||
def get_cache_key_counter(bound_method, *args, **kwargs):
|
||||
""" Return the cache, key and stat counter for the given call. """
|
||||
model = bound_method.__self__
|
||||
ormcache = bound_method.clear_cache.__self__
|
||||
cache, key0, counter = ormcache.lru(model)
|
||||
key = key0 + ormcache.key(model, *args, **kwargs)
|
||||
return cache, key, counter
|
||||
|
||||
# For backward compatibility
|
||||
cache = ormcache
|
||||
338
odoo-bringout-oca-ocb-base/odoo/tools/cloc.py
Normal file
338
odoo-bringout-oca-ocb-base/odoo/tools/cloc.py
Normal file
|
|
@ -0,0 +1,338 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
import ast
|
||||
import pathlib
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
|
||||
import odoo
|
||||
from odoo.tools.config import config
|
||||
|
||||
VERSION = 1
|
||||
DEFAULT_EXCLUDE = [
|
||||
"__manifest__.py",
|
||||
"__openerp__.py",
|
||||
"tests/**/*",
|
||||
"static/lib/**/*",
|
||||
"static/tests/**/*",
|
||||
"migrations/**/*",
|
||||
"upgrades/**/*",
|
||||
]
|
||||
|
||||
STANDARD_MODULES = ['web', 'web_enterprise', 'theme_common', 'base']
|
||||
MAX_FILE_SIZE = 25 * 2**20 # 25 MB
|
||||
MAX_LINE_SIZE = 100000
|
||||
VALID_EXTENSION = ['.py', '.js', '.xml', '.css', '.scss']
|
||||
|
||||
class Cloc(object):
|
||||
def __init__(self):
|
||||
self.modules = {}
|
||||
self.code = {}
|
||||
self.total = {}
|
||||
self.errors = {}
|
||||
self.excluded = {}
|
||||
self.max_width = 70
|
||||
|
||||
#------------------------------------------------------
|
||||
# Parse
|
||||
#------------------------------------------------------
|
||||
def parse_xml(self, s):
|
||||
s = s.strip() + "\n"
|
||||
# Unbalanced xml comments inside a CDATA are not supported, and xml
|
||||
# comments inside a CDATA will (wrongly) be considered as comment
|
||||
total = s.count("\n")
|
||||
s = re.sub("(<!--.*?-->)", "", s, flags=re.DOTALL)
|
||||
s = re.sub(r"\s*\n\s*", r"\n", s).lstrip()
|
||||
return s.count("\n"), total
|
||||
|
||||
def parse_py(self, s):
|
||||
try:
|
||||
s = s.strip() + "\n"
|
||||
total = s.count("\n")
|
||||
lines = set()
|
||||
for i in ast.walk(ast.parse(s)):
|
||||
# we only count 1 for a long string or a docstring
|
||||
if hasattr(i, 'lineno'):
|
||||
lines.add(i.lineno)
|
||||
return len(lines), total
|
||||
except Exception:
|
||||
return (-1, "Syntax Error")
|
||||
|
||||
def parse_c_like(self, s, regex):
|
||||
# Based on https://stackoverflow.com/questions/241327
|
||||
s = s.strip() + "\n"
|
||||
total = s.count("\n")
|
||||
# To avoid to use too much memory we don't try to count file
|
||||
# with very large line, usually minified file
|
||||
if max(len(l) for l in s.split('\n')) > MAX_LINE_SIZE:
|
||||
return -1, "Max line size exceeded"
|
||||
|
||||
def replacer(match):
|
||||
s = match.group(0)
|
||||
return " " if s.startswith('/') else s
|
||||
|
||||
comments_re = re.compile(regex, re.DOTALL | re.MULTILINE)
|
||||
s = re.sub(comments_re, replacer, s)
|
||||
s = re.sub(r"\s*\n\s*", r"\n", s).lstrip()
|
||||
return s.count("\n"), total
|
||||
|
||||
def parse_js(self, s):
|
||||
return self.parse_c_like(s, r'//.*?$|(?<!\\)/\*.*?\*/|\'(\\.|[^\\\'])*\'|"(\\.|[^\\"])*"')
|
||||
|
||||
def parse_scss(self, s):
|
||||
return self.parse_c_like(s, r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"')
|
||||
|
||||
def parse_css(self, s):
|
||||
return self.parse_c_like(s, r'/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"')
|
||||
|
||||
def parse(self, s, ext):
|
||||
if ext == '.py':
|
||||
return self.parse_py(s)
|
||||
elif ext == '.js':
|
||||
return self.parse_js(s)
|
||||
elif ext == '.xml':
|
||||
return self.parse_xml(s)
|
||||
elif ext == '.css':
|
||||
return self.parse_css(s)
|
||||
elif ext == '.scss':
|
||||
return self.parse_scss(s)
|
||||
|
||||
#------------------------------------------------------
|
||||
# Enumeration
|
||||
#------------------------------------------------------
|
||||
def book(self, module, item='', count=(0, 0), exclude=False):
|
||||
if count[0] == -1:
|
||||
self.errors.setdefault(module, {})
|
||||
self.errors[module][item] = count[1]
|
||||
elif exclude and item:
|
||||
self.excluded.setdefault(module, {})
|
||||
self.excluded[module][item] = count
|
||||
else:
|
||||
self.modules.setdefault(module, {})
|
||||
if item:
|
||||
self.modules[module][item] = count
|
||||
self.code[module] = self.code.get(module, 0) + count[0]
|
||||
self.total[module] = self.total.get(module, 0) + count[1]
|
||||
self.max_width = max(self.max_width, len(module), len(item) + 4)
|
||||
|
||||
def count_path(self, path, exclude=None):
|
||||
path = path.rstrip('/')
|
||||
exclude_list = []
|
||||
for i in odoo.modules.module.MANIFEST_NAMES:
|
||||
manifest_path = os.path.join(path, i)
|
||||
try:
|
||||
with open(manifest_path, 'rb') as manifest:
|
||||
exclude_list.extend(DEFAULT_EXCLUDE)
|
||||
d = ast.literal_eval(manifest.read().decode('latin1'))
|
||||
for j in ['cloc_exclude', 'demo', 'demo_xml']:
|
||||
exclude_list.extend(d.get(j, []))
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
if not exclude:
|
||||
exclude = set()
|
||||
for i in filter(None, exclude_list):
|
||||
exclude.update(str(p) for p in pathlib.Path(path).glob(i))
|
||||
|
||||
module_name = os.path.basename(path)
|
||||
self.book(module_name)
|
||||
for root, dirs, files in os.walk(path):
|
||||
for file_name in files:
|
||||
file_path = os.path.join(root, file_name)
|
||||
|
||||
if file_path in exclude:
|
||||
continue
|
||||
|
||||
ext = os.path.splitext(file_path)[1].lower()
|
||||
if ext not in VALID_EXTENSION:
|
||||
continue
|
||||
|
||||
if os.path.getsize(file_path) > MAX_FILE_SIZE:
|
||||
self.book(module_name, file_path, (-1, "Max file size exceeded"))
|
||||
continue
|
||||
|
||||
with open(file_path, 'rb') as f:
|
||||
# Decode using latin1 to avoid error that may raise by decoding with utf8
|
||||
# The chars not correctly decoded in latin1 have no impact on how many lines will be counted
|
||||
content = f.read().decode('latin1')
|
||||
self.book(module_name, file_path, self.parse(content, ext))
|
||||
|
||||
def count_modules(self, env):
|
||||
# Exclude standard addons paths
|
||||
exclude_heuristic = [odoo.modules.get_module_path(m, display_warning=False) for m in STANDARD_MODULES]
|
||||
exclude_path = set([os.path.dirname(os.path.realpath(m)) for m in exclude_heuristic if m])
|
||||
|
||||
domain = [('state', '=', 'installed')]
|
||||
# if base_import_module is present
|
||||
if env['ir.module.module']._fields.get('imported'):
|
||||
domain.append(('imported', '=', False))
|
||||
module_list = env['ir.module.module'].search(domain).mapped('name')
|
||||
|
||||
for module_name in module_list:
|
||||
module_path = os.path.realpath(odoo.modules.get_module_path(module_name))
|
||||
if module_path:
|
||||
if any(module_path.startswith(i) for i in exclude_path):
|
||||
continue
|
||||
self.count_path(module_path)
|
||||
|
||||
def count_customization(self, env):
|
||||
imported_module_sa = ""
|
||||
if env['ir.module.module']._fields.get('imported'):
|
||||
imported_module_sa = "OR (m.imported = TRUE AND m.state = 'installed')"
|
||||
query = """
|
||||
SELECT s.id, min(m.name), array_agg(d.module)
|
||||
FROM ir_act_server AS s
|
||||
LEFT JOIN ir_model_data AS d
|
||||
ON (d.res_id = s.id AND d.model = 'ir.actions.server')
|
||||
LEFT JOIN ir_module_module AS m
|
||||
ON m.name = d.module
|
||||
WHERE s.state = 'code' AND (m.name IS null {})
|
||||
GROUP BY s.id
|
||||
""".format(imported_module_sa)
|
||||
env.cr.execute(query)
|
||||
data = {r[0]: (r[1], r[2]) for r in env.cr.fetchall()}
|
||||
for a in env['ir.actions.server'].browse(data.keys()):
|
||||
self.book(
|
||||
data[a.id][0] or "odoo/studio",
|
||||
"ir.actions.server/%s: %s" % (a.id, a.name),
|
||||
self.parse_py(a.code),
|
||||
'__cloc_exclude__' in data[a.id][1]
|
||||
)
|
||||
|
||||
imported_module_field = ("'odoo/studio'", "")
|
||||
if env['ir.module.module']._fields.get('imported'):
|
||||
imported_module_field = ("min(m.name)", "AND m.imported = TRUE AND m.state = 'installed'")
|
||||
# We always want to count manual compute field unless they are generated by studio
|
||||
# the module should be odoo/studio unless it comes from an imported module install
|
||||
# because manual field get an external id from the original module of the model
|
||||
query = r"""
|
||||
SELECT f.id, f.name, {}, array_agg(d.module)
|
||||
FROM ir_model_fields AS f
|
||||
LEFT JOIN ir_model_data AS d ON (d.res_id = f.id AND d.model = 'ir.model.fields')
|
||||
LEFT JOIN ir_module_module AS m ON m.name = d.module {}
|
||||
WHERE f.compute IS NOT null AND f.state = 'manual'
|
||||
GROUP BY f.id, f.name
|
||||
""".format(*imported_module_field)
|
||||
env.cr.execute(query)
|
||||
# Do not count field generated by studio
|
||||
all_data = env.cr.fetchall()
|
||||
data = {r[0]: (r[2], r[3]) for r in all_data if not ("studio_customization" in r[3] and not r[1].startswith('x_studio'))}
|
||||
for f in env['ir.model.fields'].browse(data.keys()):
|
||||
self.book(
|
||||
data[f.id][0] or "odoo/studio",
|
||||
"ir.model.fields/%s: %s" % (f.id, f.name),
|
||||
self.parse_py(f.compute),
|
||||
'__cloc_exclude__' in data[f.id][1]
|
||||
)
|
||||
|
||||
if not env['ir.module.module']._fields.get('imported'):
|
||||
return
|
||||
|
||||
# Count qweb view only from imported module and not studio
|
||||
query = """
|
||||
SELECT view.id, min(mod.name), array_agg(data.module)
|
||||
FROM ir_ui_view view
|
||||
INNER JOIN ir_model_data data ON view.id = data.res_id AND data.model = 'ir.ui.view'
|
||||
LEFT JOIN ir_module_module mod ON mod.name = data.module AND mod.imported = True
|
||||
WHERE view.type = 'qweb' AND data.module != 'studio_customization'
|
||||
GROUP BY view.id
|
||||
HAVING count(mod.name) > 0
|
||||
"""
|
||||
env.cr.execute(query)
|
||||
custom_views = {r[0]: (r[1], r[2]) for r in env.cr.fetchall()}
|
||||
for view in env['ir.ui.view'].browse(custom_views.keys()):
|
||||
module_name = custom_views[view.id][0]
|
||||
self.book(
|
||||
module_name,
|
||||
"/%s/views/%s.xml" % (module_name, view.name),
|
||||
self.parse_xml(view.arch_base),
|
||||
'__cloc_exclude__' in custom_views[view.id][1]
|
||||
)
|
||||
|
||||
# Count js, xml, css/scss file from imported module
|
||||
query = r"""
|
||||
SELECT attach.id, min(mod.name), array_agg(data.module)
|
||||
FROM ir_attachment attach
|
||||
INNER JOIN ir_model_data data ON attach.id = data.res_id AND data.model = 'ir.attachment'
|
||||
LEFT JOIN ir_module_module mod ON mod.name = data.module AND mod.imported = True
|
||||
WHERE attach.name ~ '.*\.(js|xml|css|scss)$'
|
||||
GROUP BY attach.id
|
||||
HAVING count(mod.name) > 0
|
||||
"""
|
||||
env.cr.execute(query)
|
||||
uploaded_file = {r[0]: (r[1], r[2]) for r in env.cr.fetchall()}
|
||||
for attach in env['ir.attachment'].browse(uploaded_file.keys()):
|
||||
module_name = uploaded_file[attach.id][0]
|
||||
ext = os.path.splitext(attach.url)[1].lower()
|
||||
if ext not in VALID_EXTENSION:
|
||||
continue
|
||||
|
||||
if len(attach.datas) > MAX_FILE_SIZE:
|
||||
self.book(module_name, attach.url, (-1, "Max file size exceeded"))
|
||||
continue
|
||||
|
||||
# Decode using latin1 to avoid error that may raise by decoding with utf8
|
||||
# The chars not correctly decoded in latin1 have no impact on how many lines will be counted
|
||||
content = attach.raw.decode('latin1')
|
||||
self.book(
|
||||
module_name,
|
||||
attach.url,
|
||||
self.parse(content, ext),
|
||||
'__cloc_exclude__' in uploaded_file[attach.id][1],
|
||||
)
|
||||
|
||||
def count_env(self, env):
|
||||
self.count_modules(env)
|
||||
self.count_customization(env)
|
||||
|
||||
def count_database(self, database):
|
||||
registry = odoo.registry(config['db_name'])
|
||||
with registry.cursor() as cr:
|
||||
uid = odoo.SUPERUSER_ID
|
||||
env = odoo.api.Environment(cr, uid, {})
|
||||
self.count_env(env)
|
||||
|
||||
#------------------------------------------------------
|
||||
# Report
|
||||
#------------------------------------------------------
|
||||
# pylint: disable=W0141
|
||||
def report(self, verbose=False, width=None):
|
||||
# Prepare format
|
||||
if not width:
|
||||
width = min(self.max_width, shutil.get_terminal_size()[0] - 24)
|
||||
hr = "-" * (width + 24) + "\n"
|
||||
fmt = '{k:%d}{lines:>8}{other:>8}{code:>8}\n' % (width,)
|
||||
|
||||
# Render
|
||||
s = fmt.format(k="Odoo cloc", lines="Line", other="Other", code="Code")
|
||||
s += hr
|
||||
for m in sorted(self.modules):
|
||||
s += fmt.format(k=m, lines=self.total[m], other=self.total[m]-self.code[m], code=self.code[m])
|
||||
if verbose:
|
||||
for i in sorted(self.modules[m], key=lambda i: self.modules[m][i][0], reverse=True):
|
||||
code, total = self.modules[m][i]
|
||||
s += fmt.format(k=' ' + i, lines=total, other=total - code, code=code)
|
||||
s += hr
|
||||
total = sum(self.total.values())
|
||||
code = sum(self.code.values())
|
||||
s += fmt.format(k='', lines=total, other=total - code, code=code)
|
||||
print(s)
|
||||
|
||||
if self.excluded and verbose:
|
||||
ex = fmt.format(k="Excluded", lines="Line", other="Other", code="Code")
|
||||
ex += hr
|
||||
for m in sorted(self.excluded):
|
||||
for i in sorted(self.excluded[m], key=lambda i: self.excluded[m][i][0], reverse=True):
|
||||
code, total = self.excluded[m][i]
|
||||
ex += fmt.format(k=' ' + i, lines=total, other=total - code, code=code)
|
||||
ex += hr
|
||||
print(ex)
|
||||
|
||||
if self.errors:
|
||||
e = "\nErrors\n\n"
|
||||
for m in sorted(self.errors):
|
||||
e += "{}\n".format(m)
|
||||
for i in sorted(self.errors[m]):
|
||||
e += fmt.format(k=' ' + i, lines=self.errors[m][i], other='', code='')
|
||||
print(e)
|
||||
795
odoo-bringout-oca-ocb-base/odoo/tools/config.py
Normal file
795
odoo-bringout-oca-ocb-base/odoo/tools/config.py
Normal file
|
|
@ -0,0 +1,795 @@
|
|||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
import configparser as ConfigParser
|
||||
import errno
|
||||
import logging
|
||||
import optparse
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import warnings
|
||||
import odoo
|
||||
from os.path import expandvars, expanduser, abspath, realpath, normcase
|
||||
from .. import release, conf, loglevels
|
||||
from . import appdirs
|
||||
|
||||
from passlib.context import CryptContext
|
||||
crypt_context = CryptContext(schemes=['pbkdf2_sha512', 'plaintext'],
|
||||
deprecated=['plaintext'],
|
||||
pbkdf2_sha512__rounds=600_000)
|
||||
|
||||
class MyOption (optparse.Option, object):
|
||||
""" optparse Option with two additional attributes.
|
||||
|
||||
The list of command line options (getopt.Option) is used to create the
|
||||
list of the configuration file options. When reading the file, and then
|
||||
reading the command line arguments, we don't want optparse.parse results
|
||||
to override the configuration file values. But if we provide default
|
||||
values to optparse, optparse will return them and we can't know if they
|
||||
were really provided by the user or not. A solution is to not use
|
||||
optparse's default attribute, but use a custom one (that will be copied
|
||||
to create the default values of the configuration file).
|
||||
|
||||
"""
|
||||
def __init__(self, *opts, **attrs):
|
||||
self.my_default = attrs.pop('my_default', None)
|
||||
super(MyOption, self).__init__(*opts, **attrs)
|
||||
|
||||
DEFAULT_LOG_HANDLER = ':INFO'
|
||||
def _get_default_datadir():
|
||||
home = os.path.expanduser('~')
|
||||
if os.path.isdir(home):
|
||||
func = appdirs.user_data_dir
|
||||
else:
|
||||
if sys.platform in ['win32', 'darwin']:
|
||||
func = appdirs.site_data_dir
|
||||
else:
|
||||
func = lambda **kwarg: "/var/lib/%s" % kwarg['appname'].lower()
|
||||
# No "version" kwarg as session and filestore paths are shared against series
|
||||
return func(appname=release.product_name, appauthor=release.author)
|
||||
|
||||
def _deduplicate_loggers(loggers):
|
||||
""" Avoid saving multiple logging levels for the same loggers to a save
|
||||
file, that just takes space and the list can potentially grow unbounded
|
||||
if for some odd reason people use :option`--save`` all the time.
|
||||
"""
|
||||
# dict(iterable) -> the last item of iterable for any given key wins,
|
||||
# which is what we want and expect. Output order should not matter as
|
||||
# there are no duplicates within the output sequence
|
||||
return (
|
||||
'{}:{}'.format(logger, level)
|
||||
for logger, level in dict(it.split(':') for it in loggers).items()
|
||||
)
|
||||
|
||||
class configmanager(object):
|
||||
def __init__(self, fname=None):
|
||||
"""Constructor.
|
||||
|
||||
:param fname: a shortcut allowing to instantiate :class:`configmanager`
|
||||
from Python code without resorting to environment
|
||||
variable
|
||||
"""
|
||||
# Options not exposed on the command line. Command line options will be added
|
||||
# from optparse's parser.
|
||||
self.options = {
|
||||
'admin_passwd': 'admin',
|
||||
'csv_internal_sep': ',',
|
||||
'publisher_warranty_url': 'http://services.openerp.com/publisher-warranty/',
|
||||
'reportgz': False,
|
||||
'root_path': None,
|
||||
'websocket_keep_alive_timeout': 3600,
|
||||
'websocket_rate_limit_burst': 10,
|
||||
'websocket_rate_limit_delay': 0.2,
|
||||
}
|
||||
|
||||
# Not exposed in the configuration file.
|
||||
self.blacklist_for_save = set([
|
||||
'publisher_warranty_url', 'load_language', 'root_path',
|
||||
'init', 'save', 'config', 'update', 'stop_after_init', 'dev_mode', 'shell_interface',
|
||||
'longpolling_port',
|
||||
])
|
||||
|
||||
# dictionary mapping option destination (keys in self.options) to MyOptions.
|
||||
self.casts = {}
|
||||
|
||||
self.misc = {}
|
||||
self.config_file = fname
|
||||
|
||||
self._LOGLEVELS = dict([
|
||||
(getattr(loglevels, 'LOG_%s' % x), getattr(logging, x))
|
||||
for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET')
|
||||
])
|
||||
|
||||
version = "%s %s" % (release.description, release.version)
|
||||
self.parser = parser = optparse.OptionParser(version=version, option_class=MyOption)
|
||||
|
||||
# Server startup config
|
||||
group = optparse.OptionGroup(parser, "Common options")
|
||||
group.add_option("-c", "--config", dest="config", help="specify alternate config file")
|
||||
group.add_option("-s", "--save", action="store_true", dest="save", default=False,
|
||||
help="save configuration to ~/.odoorc (or to ~/.openerp_serverrc if it exists)")
|
||||
group.add_option("-i", "--init", dest="init", help="install one or more modules (comma-separated list, use \"all\" for all modules), requires -d")
|
||||
group.add_option("-u", "--update", dest="update",
|
||||
help="update one or more modules (comma-separated list, use \"all\" for all modules). Requires -d.")
|
||||
group.add_option("--without-demo", dest="without_demo",
|
||||
help="disable loading demo data for modules to be installed (comma-separated, use \"all\" for all modules). Requires -d and -i. Default is %default",
|
||||
my_default=False)
|
||||
group.add_option("-P", "--import-partial", dest="import_partial", my_default='',
|
||||
help="Use this for big data importation, if it crashes you will be able to continue at the current state. Provide a filename to store intermediate importation states.")
|
||||
group.add_option("--pidfile", dest="pidfile", help="file where the server pid will be stored")
|
||||
group.add_option("--addons-path", dest="addons_path",
|
||||
help="specify additional addons paths (separated by commas).",
|
||||
action="callback", callback=self._check_addons_path, nargs=1, type="string")
|
||||
group.add_option("--upgrade-path", dest="upgrade_path",
|
||||
help="specify an additional upgrade path.",
|
||||
action="callback", callback=self._check_upgrade_path, nargs=1, type="string")
|
||||
group.add_option("--pre-upgrade-scripts", dest="pre_upgrade_scripts", my_default="",
|
||||
help="Run specific upgrade scripts before loading any module when -u is provided.",
|
||||
action="callback", callback=self._check_scripts, nargs=1, type="string")
|
||||
group.add_option("--load", dest="server_wide_modules", help="Comma-separated list of server-wide modules.", my_default='base,web')
|
||||
|
||||
group.add_option("-D", "--data-dir", dest="data_dir", my_default=_get_default_datadir(),
|
||||
help="Directory where to store Odoo data")
|
||||
parser.add_option_group(group)
|
||||
|
||||
# HTTP
|
||||
group = optparse.OptionGroup(parser, "HTTP Service Configuration")
|
||||
group.add_option("--http-interface", dest="http_interface", my_default='',
|
||||
help="Listen interface address for HTTP services. "
|
||||
"Keep empty to listen on all interfaces (0.0.0.0)")
|
||||
group.add_option("-p", "--http-port", dest="http_port", my_default=8069,
|
||||
help="Listen port for the main HTTP service", type="int", metavar="PORT")
|
||||
group.add_option("--longpolling-port", dest="longpolling_port", my_default=0,
|
||||
help="Deprecated alias to the gevent-port option", type="int", metavar="PORT")
|
||||
group.add_option("--gevent-port", dest="gevent_port", my_default=8072,
|
||||
help="Listen port for the gevent worker", type="int", metavar="PORT")
|
||||
group.add_option("--no-http", dest="http_enable", action="store_false", my_default=True,
|
||||
help="Disable the HTTP and Longpolling services entirely")
|
||||
group.add_option("--proxy-mode", dest="proxy_mode", action="store_true", my_default=False,
|
||||
help="Activate reverse proxy WSGI wrappers (headers rewriting) "
|
||||
"Only enable this when running behind a trusted web proxy!")
|
||||
group.add_option("--x-sendfile", dest="x_sendfile", action="store_true", my_default=False,
|
||||
help="Activate X-Sendfile (apache) and X-Accel-Redirect (nginx) "
|
||||
"HTTP response header to delegate the delivery of large "
|
||||
"files (assets/attachments) to the web server.")
|
||||
# HTTP: hidden backwards-compatibility for "*xmlrpc*" options
|
||||
hidden = optparse.SUPPRESS_HELP
|
||||
group.add_option("--xmlrpc-interface", dest="http_interface", help=hidden)
|
||||
group.add_option("--xmlrpc-port", dest="http_port", type="int", help=hidden)
|
||||
group.add_option("--no-xmlrpc", dest="http_enable", action="store_false", help=hidden)
|
||||
|
||||
parser.add_option_group(group)
|
||||
|
||||
# WEB
|
||||
group = optparse.OptionGroup(parser, "Web interface Configuration")
|
||||
group.add_option("--db-filter", dest="dbfilter", my_default='', metavar="REGEXP",
|
||||
help="Regular expressions for filtering available databases for Web UI. "
|
||||
"The expression can use %d (domain) and %h (host) placeholders.")
|
||||
parser.add_option_group(group)
|
||||
|
||||
# Testing Group
|
||||
group = optparse.OptionGroup(parser, "Testing Configuration")
|
||||
group.add_option("--test-file", dest="test_file", my_default=False,
|
||||
help="Launch a python test file.")
|
||||
group.add_option("--test-enable", action="callback", callback=self._test_enable_callback,
|
||||
dest='test_enable',
|
||||
help="Enable unit tests.")
|
||||
group.add_option("--test-tags", dest="test_tags",
|
||||
help="Comma-separated list of specs to filter which tests to execute. Enable unit tests if set. "
|
||||
"A filter spec has the format: [-][tag][/module][:class][.method][[params]] "
|
||||
"The '-' specifies if we want to include or exclude tests matching this spec. "
|
||||
"The tag will match tags added on a class with a @tagged decorator "
|
||||
"(all Test classes have 'standard' and 'at_install' tags "
|
||||
"until explicitly removed, see the decorator documentation). "
|
||||
"'*' will match all tags. "
|
||||
"If tag is omitted on include mode, its value is 'standard'. "
|
||||
"If tag is omitted on exclude mode, its value is '*'. "
|
||||
"The module, class, and method will respectively match the module name, test class name and test method name. "
|
||||
"Example: --test-tags :TestClass.test_func,/test_module,external "
|
||||
"It is also possible to provide parameters to a test method that supports them"
|
||||
"Example: --test-tags /web.test_js[mail]"
|
||||
"If negated, a test-tag with parameter will negate the parameter when passing it to the test"
|
||||
|
||||
"Filtering and executing the tests happens twice: right "
|
||||
"after each module installation/update and at the end "
|
||||
"of the modules loading. At each stage tests are filtered "
|
||||
"by --test-tags specs and additionally by dynamic specs "
|
||||
"'at_install' and 'post_install' correspondingly.")
|
||||
|
||||
group.add_option("--screencasts", dest="screencasts", action="store", my_default=None,
|
||||
metavar='DIR',
|
||||
help="Screencasts will go in DIR/{db_name}/screencasts.")
|
||||
temp_tests_dir = os.path.join(tempfile.gettempdir(), 'odoo_tests')
|
||||
group.add_option("--screenshots", dest="screenshots", action="store", my_default=temp_tests_dir,
|
||||
metavar='DIR',
|
||||
help="Screenshots will go in DIR/{db_name}/screenshots. Defaults to %s." % temp_tests_dir)
|
||||
parser.add_option_group(group)
|
||||
|
||||
# Logging Group
|
||||
group = optparse.OptionGroup(parser, "Logging Configuration")
|
||||
group.add_option("--logfile", dest="logfile", help="file where the server log will be stored")
|
||||
group.add_option("--syslog", action="store_true", dest="syslog", my_default=False, help="Send the log to the syslog server")
|
||||
group.add_option('--log-handler', action="append", default=[], my_default=DEFAULT_LOG_HANDLER, metavar="PREFIX:LEVEL", help='setup a handler at LEVEL for a given PREFIX. An empty PREFIX indicates the root logger. This option can be repeated. Example: "odoo.orm:DEBUG" or "werkzeug:CRITICAL" (default: ":INFO")')
|
||||
group.add_option('--log-web', action="append_const", dest="log_handler", const="odoo.http:DEBUG", help='shortcut for --log-handler=odoo.http:DEBUG')
|
||||
group.add_option('--log-sql', action="append_const", dest="log_handler", const="odoo.sql_db:DEBUG", help='shortcut for --log-handler=odoo.sql_db:DEBUG')
|
||||
group.add_option('--log-db', dest='log_db', help="Logging database", my_default=False)
|
||||
group.add_option('--log-db-level', dest='log_db_level', my_default='warning', help="Logging database level")
|
||||
# For backward-compatibility, map the old log levels to something
|
||||
# quite close.
|
||||
levels = [
|
||||
'info', 'debug_rpc', 'warn', 'test', 'critical', 'runbot',
|
||||
'debug_sql', 'error', 'debug', 'debug_rpc_answer', 'notset'
|
||||
]
|
||||
group.add_option('--log-level', dest='log_level', type='choice',
|
||||
choices=levels, my_default='info',
|
||||
help='specify the level of the logging. Accepted values: %s.' % (levels,))
|
||||
|
||||
parser.add_option_group(group)
|
||||
|
||||
# SMTP Group
|
||||
group = optparse.OptionGroup(parser, "SMTP Configuration")
|
||||
group.add_option('--email-from', dest='email_from', my_default=False,
|
||||
help='specify the SMTP email address for sending email')
|
||||
group.add_option('--from-filter', dest='from_filter', my_default=False,
|
||||
help='specify for which email address the SMTP configuration can be used')
|
||||
group.add_option('--smtp', dest='smtp_server', my_default='localhost',
|
||||
help='specify the SMTP server for sending email')
|
||||
group.add_option('--smtp-port', dest='smtp_port', my_default=25,
|
||||
help='specify the SMTP port', type="int")
|
||||
group.add_option('--smtp-ssl', dest='smtp_ssl', action='store_true', my_default=False,
|
||||
help='if passed, SMTP connections will be encrypted with SSL (STARTTLS)')
|
||||
group.add_option('--smtp-user', dest='smtp_user', my_default=False,
|
||||
help='specify the SMTP username for sending email')
|
||||
group.add_option('--smtp-password', dest='smtp_password', my_default=False,
|
||||
help='specify the SMTP password for sending email')
|
||||
group.add_option('--smtp-ssl-certificate-filename', dest='smtp_ssl_certificate_filename', my_default=False,
|
||||
help='specify the SSL certificate used for authentication')
|
||||
group.add_option('--smtp-ssl-private-key-filename', dest='smtp_ssl_private_key_filename', my_default=False,
|
||||
help='specify the SSL private key used for authentication')
|
||||
parser.add_option_group(group)
|
||||
|
||||
group = optparse.OptionGroup(parser, "Database related options")
|
||||
group.add_option("-d", "--database", dest="db_name", my_default=False,
|
||||
help="specify the database name")
|
||||
group.add_option("-r", "--db_user", dest="db_user", my_default=False,
|
||||
help="specify the database user name")
|
||||
group.add_option("-w", "--db_password", dest="db_password", my_default=False,
|
||||
help="specify the database password")
|
||||
group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path")
|
||||
group.add_option("--db_host", dest="db_host", my_default=False,
|
||||
help="specify the database host")
|
||||
group.add_option("--db_port", dest="db_port", my_default=False,
|
||||
help="specify the database port", type="int")
|
||||
group.add_option("--db_sslmode", dest="db_sslmode", type="choice", my_default='prefer',
|
||||
choices=['disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full'],
|
||||
help="specify the database ssl connection mode (see PostgreSQL documentation)")
|
||||
group.add_option("--db_maxconn", dest="db_maxconn", type='int', my_default=64,
|
||||
help="specify the maximum number of physical connections to PostgreSQL")
|
||||
group.add_option("--db-template", dest="db_template", my_default="template0",
|
||||
help="specify a custom database template to create a new database")
|
||||
parser.add_option_group(group)
|
||||
|
||||
group = optparse.OptionGroup(parser, "Internationalisation options",
|
||||
"Use these options to translate Odoo to another language. "
|
||||
"See i18n section of the user manual. Option '-d' is mandatory. "
|
||||
"Option '-l' is mandatory in case of importation"
|
||||
)
|
||||
group.add_option('--load-language', dest="load_language",
|
||||
help="specifies the languages for the translations you want to be loaded")
|
||||
group.add_option('-l', "--language", dest="language",
|
||||
help="specify the language of the translation file. Use it with --i18n-export or --i18n-import")
|
||||
group.add_option("--i18n-export", dest="translate_out",
|
||||
help="export all sentences to be translated to a CSV file, a PO file or a TGZ archive and exit")
|
||||
group.add_option("--i18n-import", dest="translate_in",
|
||||
help="import a CSV or a PO file with translations and exit. The '-l' option is required.")
|
||||
group.add_option("--i18n-overwrite", dest="overwrite_existing_translations", action="store_true", my_default=False,
|
||||
help="overwrites existing translation terms on updating a module or importing a CSV or a PO file.")
|
||||
group.add_option("--modules", dest="translate_modules",
|
||||
help="specify modules to export. Use in combination with --i18n-export")
|
||||
parser.add_option_group(group)
|
||||
|
||||
security = optparse.OptionGroup(parser, 'Security-related options')
|
||||
security.add_option('--no-database-list', action="store_false", dest='list_db', my_default=True,
|
||||
help="Disable the ability to obtain or view the list of databases. "
|
||||
"Also disable access to the database manager and selector, "
|
||||
"so be sure to set a proper --database parameter first")
|
||||
parser.add_option_group(security)
|
||||
|
||||
# Advanced options
|
||||
group = optparse.OptionGroup(parser, "Advanced options")
|
||||
group.add_option('--dev', dest='dev_mode', type="string",
|
||||
help="Enable developer mode. Param: List of options separated by comma. "
|
||||
"Options : all, reload, qweb, xml")
|
||||
group.add_option('--shell-interface', dest='shell_interface', type="string",
|
||||
help="Specify a preferred REPL to use in shell mode. Supported REPLs are: "
|
||||
"[ipython|ptpython|bpython|python]")
|
||||
group.add_option("--stop-after-init", action="store_true", dest="stop_after_init", my_default=False,
|
||||
help="stop the server after its initialization")
|
||||
group.add_option("--osv-memory-count-limit", dest="osv_memory_count_limit", my_default=0,
|
||||
help="Force a limit on the maximum number of records kept in the virtual "
|
||||
"osv_memory tables. By default there is no limit.",
|
||||
type="int")
|
||||
group.add_option("--transient-age-limit", dest="transient_age_limit", my_default=1.0,
|
||||
help="Time limit (decimal value in hours) records created with a "
|
||||
"TransientModel (mostly wizard) are kept in the database. Default to 1 hour.",
|
||||
type="float")
|
||||
group.add_option("--osv-memory-age-limit", dest="osv_memory_age_limit", my_default=False,
|
||||
help="Deprecated alias to the transient-age-limit option",
|
||||
type="float")
|
||||
group.add_option("--max-cron-threads", dest="max_cron_threads", my_default=2,
|
||||
help="Maximum number of threads processing concurrently cron jobs (default 2).",
|
||||
type="int")
|
||||
group.add_option("--limit-time-worker-cron", dest="limit_time_worker_cron", my_default=0,
|
||||
help="Maximum time a cron thread/worker stays alive before it is restarted. "
|
||||
"Set to 0 to disable. (default: 0)",
|
||||
type="int")
|
||||
group.add_option("--unaccent", dest="unaccent", my_default=False, action="store_true",
|
||||
help="Try to enable the unaccent extension when creating new databases.")
|
||||
group.add_option("--geoip-db", dest="geoip_database", my_default='/usr/share/GeoIP/GeoLite2-City.mmdb',
|
||||
help="Absolute path to the GeoIP database file.")
|
||||
parser.add_option_group(group)
|
||||
|
||||
if os.name == 'posix':
|
||||
group = optparse.OptionGroup(parser, "Multiprocessing options")
|
||||
# TODO sensible default for the three following limits.
|
||||
group.add_option("--workers", dest="workers", my_default=0,
|
||||
help="Specify the number of workers, 0 disable prefork mode.",
|
||||
type="int")
|
||||
group.add_option("--limit-memory-soft", dest="limit_memory_soft", my_default=2048 * 1024 * 1024,
|
||||
help="Maximum allowed virtual memory per worker (in bytes), when reached the worker be "
|
||||
"reset after the current request (default 2048MiB).",
|
||||
type="int")
|
||||
group.add_option("--limit-memory-hard", dest="limit_memory_hard", my_default=2560 * 1024 * 1024,
|
||||
help="Maximum allowed virtual memory per worker (in bytes), when reached, any memory "
|
||||
"allocation will fail (default 2560MiB).",
|
||||
type="int")
|
||||
group.add_option("--limit-time-cpu", dest="limit_time_cpu", my_default=60,
|
||||
help="Maximum allowed CPU time per request (default 60).",
|
||||
type="int")
|
||||
group.add_option("--limit-time-real", dest="limit_time_real", my_default=120,
|
||||
help="Maximum allowed Real time per request (default 120).",
|
||||
type="int")
|
||||
group.add_option("--limit-time-real-cron", dest="limit_time_real_cron", my_default=-1,
|
||||
help="Maximum allowed Real time per cron job. (default: --limit-time-real). "
|
||||
"Set to 0 for no limit. ",
|
||||
type="int")
|
||||
group.add_option("--limit-request", dest="limit_request", my_default=2**16,
|
||||
help="Maximum number of request to be processed per worker (default 65536).",
|
||||
type="int")
|
||||
parser.add_option_group(group)
|
||||
|
||||
# Copy all optparse options (i.e. MyOption) into self.options.
|
||||
for group in parser.option_groups:
|
||||
for option in group.option_list:
|
||||
if option.dest not in self.options:
|
||||
self.options[option.dest] = option.my_default
|
||||
self.casts[option.dest] = option
|
||||
|
||||
# generate default config
|
||||
self._parse_config()
|
||||
|
||||
def parse_config(self, args=None):
|
||||
""" Parse the configuration file (if any) and the command-line
|
||||
arguments.
|
||||
|
||||
This method initializes odoo.tools.config and openerp.conf (the
|
||||
former should be removed in the future) with library-wide
|
||||
configuration values.
|
||||
|
||||
This method must be called before proper usage of this library can be
|
||||
made.
|
||||
|
||||
Typical usage of this method:
|
||||
|
||||
odoo.tools.config.parse_config(sys.argv[1:])
|
||||
"""
|
||||
opt = self._parse_config(args)
|
||||
odoo.netsvc.init_logger()
|
||||
self._warn_deprecated_options()
|
||||
odoo.modules.module.initialize_sys_path()
|
||||
return opt
|
||||
|
||||
def _parse_config(self, args=None):
|
||||
if args is None:
|
||||
args = []
|
||||
opt, args = self.parser.parse_args(args)
|
||||
|
||||
def die(cond, msg):
|
||||
if cond:
|
||||
self.parser.error(msg)
|
||||
|
||||
# Ensures no illegitimate argument is silently discarded (avoids insidious "hyphen to dash" problem)
|
||||
die(args, "unrecognized parameters: '%s'" % " ".join(args))
|
||||
|
||||
die(bool(opt.syslog) and bool(opt.logfile),
|
||||
"the syslog and logfile options are exclusive")
|
||||
|
||||
die(opt.translate_in and (not opt.language or not opt.db_name),
|
||||
"the i18n-import option cannot be used without the language (-l) and the database (-d) options")
|
||||
|
||||
die(opt.overwrite_existing_translations and not (opt.translate_in or opt.update),
|
||||
"the i18n-overwrite option cannot be used without the i18n-import option or without the update option")
|
||||
|
||||
die(opt.translate_out and (not opt.db_name),
|
||||
"the i18n-export option cannot be used without the database (-d) option")
|
||||
|
||||
# Check if the config file exists (-c used, but not -s)
|
||||
die(not opt.save and opt.config and not os.access(opt.config, os.R_OK),
|
||||
"The config file '%s' selected with -c/--config doesn't exist or is not readable, "\
|
||||
"use -s/--save if you want to generate it"% opt.config)
|
||||
|
||||
die(bool(opt.osv_memory_age_limit) and bool(opt.transient_memory_age_limit),
|
||||
"the osv-memory-count-limit option cannot be used with the "
|
||||
"transient-age-limit option, please only use the latter.")
|
||||
|
||||
# place/search the config file on Win32 near the server installation
|
||||
# (../etc from the server)
|
||||
# if the server is run by an unprivileged user, he has to specify location of a config file where he has the rights to write,
|
||||
# else he won't be able to save the configurations, or even to start the server...
|
||||
# TODO use appdirs
|
||||
if os.name == 'nt':
|
||||
rcfilepath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'odoo.conf')
|
||||
else:
|
||||
rcfilepath = os.path.expanduser('~/.odoorc')
|
||||
old_rcfilepath = os.path.expanduser('~/.openerp_serverrc')
|
||||
|
||||
die(os.path.isfile(rcfilepath) and os.path.isfile(old_rcfilepath),
|
||||
"Found '.odoorc' and '.openerp_serverrc' in your path. Please keep only one of "\
|
||||
"them, preferably '.odoorc'.")
|
||||
|
||||
if not os.path.isfile(rcfilepath) and os.path.isfile(old_rcfilepath):
|
||||
rcfilepath = old_rcfilepath
|
||||
|
||||
self.rcfile = os.path.abspath(
|
||||
self.config_file or opt.config or os.environ.get('ODOO_RC') or os.environ.get('OPENERP_SERVER') or rcfilepath)
|
||||
self.load()
|
||||
|
||||
# Verify that we want to log or not, if not the output will go to stdout
|
||||
if self.options['logfile'] in ('None', 'False'):
|
||||
self.options['logfile'] = False
|
||||
# the same for the pidfile
|
||||
if self.options['pidfile'] in ('None', 'False'):
|
||||
self.options['pidfile'] = False
|
||||
# the same for the test_tags
|
||||
if self.options['test_tags'] == 'None':
|
||||
self.options['test_tags'] = None
|
||||
# and the server_wide_modules
|
||||
if self.options['server_wide_modules'] in ('', 'None', 'False'):
|
||||
self.options['server_wide_modules'] = 'base,web'
|
||||
|
||||
# if defined do not take the configfile value even if the defined value is None
|
||||
keys = ['gevent_port', 'http_interface', 'http_port', 'longpolling_port', 'http_enable', 'x_sendfile',
|
||||
'db_name', 'db_user', 'db_password', 'db_host', 'db_sslmode',
|
||||
'db_port', 'db_template', 'logfile', 'pidfile', 'smtp_port',
|
||||
'email_from', 'smtp_server', 'smtp_user', 'smtp_password', 'from_filter',
|
||||
'smtp_ssl_certificate_filename', 'smtp_ssl_private_key_filename',
|
||||
'db_maxconn', 'import_partial', 'addons_path', 'upgrade_path', 'pre_upgrade_scripts',
|
||||
'syslog', 'without_demo', 'screencasts', 'screenshots',
|
||||
'dbfilter', 'log_level', 'log_db',
|
||||
'log_db_level', 'geoip_database', 'dev_mode', 'shell_interface',
|
||||
'limit_time_worker_cron',
|
||||
]
|
||||
|
||||
for arg in keys:
|
||||
# Copy the command-line argument (except the special case for log_handler, due to
|
||||
# action=append requiring a real default, so we cannot use the my_default workaround)
|
||||
if getattr(opt, arg, None) is not None:
|
||||
self.options[arg] = getattr(opt, arg)
|
||||
# ... or keep, but cast, the config file value.
|
||||
elif isinstance(self.options[arg], str) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
|
||||
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
|
||||
|
||||
if isinstance(self.options['log_handler'], str):
|
||||
self.options['log_handler'] = self.options['log_handler'].split(',')
|
||||
self.options['log_handler'].extend(opt.log_handler)
|
||||
|
||||
# if defined but None take the configfile value
|
||||
keys = [
|
||||
'language', 'translate_out', 'translate_in', 'overwrite_existing_translations',
|
||||
'dev_mode', 'shell_interface', 'smtp_ssl', 'load_language',
|
||||
'stop_after_init', 'without_demo', 'http_enable', 'syslog',
|
||||
'list_db', 'proxy_mode',
|
||||
'test_file', 'test_tags',
|
||||
'osv_memory_count_limit', 'osv_memory_age_limit', 'transient_age_limit', 'max_cron_threads', 'unaccent',
|
||||
'data_dir',
|
||||
'server_wide_modules',
|
||||
]
|
||||
|
||||
posix_keys = [
|
||||
'workers',
|
||||
'limit_memory_hard', 'limit_memory_soft',
|
||||
'limit_time_cpu', 'limit_time_real', 'limit_request', 'limit_time_real_cron'
|
||||
]
|
||||
|
||||
if os.name == 'posix':
|
||||
keys += posix_keys
|
||||
else:
|
||||
self.options.update(dict.fromkeys(posix_keys, None))
|
||||
|
||||
# Copy the command-line arguments...
|
||||
for arg in keys:
|
||||
if getattr(opt, arg) is not None:
|
||||
self.options[arg] = getattr(opt, arg)
|
||||
# ... or keep, but cast, the config file value.
|
||||
elif isinstance(self.options[arg], str) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
|
||||
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
|
||||
|
||||
self.options['root_path'] = self._normalize(os.path.join(os.path.dirname(__file__), '..'))
|
||||
if not self.options['addons_path'] or self.options['addons_path']=='None':
|
||||
default_addons = []
|
||||
base_addons = os.path.join(self.options['root_path'], 'addons')
|
||||
if os.path.exists(base_addons):
|
||||
default_addons.append(base_addons)
|
||||
main_addons = os.path.abspath(os.path.join(self.options['root_path'], '../addons'))
|
||||
if os.path.exists(main_addons):
|
||||
default_addons.append(main_addons)
|
||||
self.options['addons_path'] = ','.join(default_addons)
|
||||
else:
|
||||
self.options['addons_path'] = ",".join(
|
||||
self._normalize(x)
|
||||
for x in self.options['addons_path'].split(',')
|
||||
if x.strip())
|
||||
|
||||
self.options["upgrade_path"] = (
|
||||
",".join(self._normalize(x)
|
||||
for x in self.options['upgrade_path'].split(',')
|
||||
if x.strip())
|
||||
if self.options['upgrade_path']
|
||||
else ""
|
||||
)
|
||||
self.options["pre_upgrade_scripts"] = (
|
||||
",".join(self._normalize(x)
|
||||
for x in self.options['pre_upgrade_scripts'].split(',')
|
||||
if x.strip())
|
||||
if self.options['pre_upgrade_scripts']
|
||||
else ""
|
||||
)
|
||||
|
||||
self.options['init'] = opt.init and dict.fromkeys(opt.init.split(','), 1) or {}
|
||||
self.options['demo'] = (dict(self.options['init'])
|
||||
if not self.options['without_demo'] else {})
|
||||
self.options['update'] = opt.update and dict.fromkeys(opt.update.split(','), 1) or {}
|
||||
self.options['translate_modules'] = opt.translate_modules and [m.strip() for m in opt.translate_modules.split(',')] or ['all']
|
||||
self.options['translate_modules'].sort()
|
||||
|
||||
dev_split = [s.strip() for s in opt.dev_mode.split(',')] if opt.dev_mode else []
|
||||
self.options['dev_mode'] = dev_split + (['reload', 'qweb', 'xml'] if 'all' in dev_split else [])
|
||||
|
||||
if opt.pg_path:
|
||||
self.options['pg_path'] = opt.pg_path
|
||||
|
||||
self.options['test_enable'] = bool(self.options['test_tags'])
|
||||
|
||||
if opt.save:
|
||||
self.save()
|
||||
|
||||
# normalize path options
|
||||
for key in ['data_dir', 'logfile', 'pidfile', 'test_file', 'screencasts', 'screenshots', 'pg_path', 'translate_out', 'translate_in', 'geoip_database']:
|
||||
self.options[key] = self._normalize(self.options[key])
|
||||
|
||||
conf.addons_paths = self.options['addons_path'].split(',')
|
||||
|
||||
conf.server_wide_modules = [
|
||||
m.strip() for m in self.options['server_wide_modules'].split(',') if m.strip()
|
||||
]
|
||||
return opt
|
||||
|
||||
def _warn_deprecated_options(self):
|
||||
if self.options['osv_memory_age_limit']:
|
||||
warnings.warn(
|
||||
"The osv-memory-age-limit is a deprecated alias to "
|
||||
"the transient-age-limit option, please use the latter.",
|
||||
DeprecationWarning)
|
||||
self.options['transient_age_limit'] = self.options.pop('osv_memory_age_limit')
|
||||
if self.options['longpolling_port']:
|
||||
warnings.warn(
|
||||
"The longpolling-port is a deprecated alias to "
|
||||
"the gevent-port option, please use the latter.",
|
||||
DeprecationWarning)
|
||||
self.options['gevent_port'] = self.options.pop('longpolling_port')
|
||||
|
||||
def _is_addons_path(self, path):
|
||||
from odoo.modules.module import MANIFEST_NAMES
|
||||
for f in os.listdir(path):
|
||||
modpath = os.path.join(path, f)
|
||||
if os.path.isdir(modpath):
|
||||
def hasfile(filename):
|
||||
return os.path.isfile(os.path.join(modpath, filename))
|
||||
if hasfile('__init__.py') and any(hasfile(mname) for mname in MANIFEST_NAMES):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _check_addons_path(self, option, opt, value, parser):
|
||||
ad_paths = []
|
||||
for path in value.split(','):
|
||||
path = path.strip()
|
||||
res = os.path.abspath(os.path.expanduser(path))
|
||||
if not os.path.isdir(res):
|
||||
raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, res))
|
||||
if not self._is_addons_path(res):
|
||||
raise optparse.OptionValueError("option %s: the path %r is not a valid addons directory" % (opt, path))
|
||||
ad_paths.append(res)
|
||||
|
||||
setattr(parser.values, option.dest, ",".join(ad_paths))
|
||||
|
||||
def _check_scripts(self, option, opt, value, parser):
|
||||
pre_upgrade_scripts = []
|
||||
for path in value.split(','):
|
||||
path = path.strip()
|
||||
res = self._normalize(path)
|
||||
if not os.path.isfile(res):
|
||||
raise optparse.OptionValueError("option %s: no such file: %r" % (opt, path))
|
||||
if res not in pre_upgrade_scripts:
|
||||
pre_upgrade_scripts.append(res)
|
||||
setattr(parser.values, option.dest, ",".join(pre_upgrade_scripts))
|
||||
|
||||
|
||||
def _check_upgrade_path(self, option, opt, value, parser):
|
||||
upgrade_path = []
|
||||
for path in value.split(','):
|
||||
path = path.strip()
|
||||
res = self._normalize(path)
|
||||
if not os.path.isdir(res):
|
||||
raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, path))
|
||||
if not self._is_upgrades_path(res):
|
||||
raise optparse.OptionValueError("option %s: the path %r is not a valid upgrade directory" % (opt, path))
|
||||
if res not in upgrade_path:
|
||||
upgrade_path.append(res)
|
||||
setattr(parser.values, option.dest, ",".join(upgrade_path))
|
||||
|
||||
def _is_upgrades_path(self, res):
|
||||
return any(
|
||||
glob.glob(os.path.join(res, f"*/*/{prefix}-*.py"))
|
||||
for prefix in ["pre", "post", "end"]
|
||||
)
|
||||
|
||||
def _test_enable_callback(self, option, opt, value, parser):
|
||||
if not parser.values.test_tags:
|
||||
parser.values.test_tags = "+standard"
|
||||
|
||||
def load(self):
|
||||
outdated_options_map = {
|
||||
'xmlrpc_port': 'http_port',
|
||||
'xmlrpc_interface': 'http_interface',
|
||||
'xmlrpc': 'http_enable',
|
||||
}
|
||||
p = ConfigParser.RawConfigParser()
|
||||
try:
|
||||
p.read([self.rcfile])
|
||||
for (name,value) in p.items('options'):
|
||||
name = outdated_options_map.get(name, name)
|
||||
if value=='True' or value=='true':
|
||||
value = True
|
||||
if value=='False' or value=='false':
|
||||
value = False
|
||||
self.options[name] = value
|
||||
#parse the other sections, as well
|
||||
for sec in p.sections():
|
||||
if sec == 'options':
|
||||
continue
|
||||
self.misc.setdefault(sec, {})
|
||||
for (name, value) in p.items(sec):
|
||||
if value=='True' or value=='true':
|
||||
value = True
|
||||
if value=='False' or value=='false':
|
||||
value = False
|
||||
self.misc[sec][name] = value
|
||||
except IOError:
|
||||
pass
|
||||
except ConfigParser.NoSectionError:
|
||||
pass
|
||||
|
||||
def save(self, keys=None):
|
||||
p = ConfigParser.RawConfigParser()
|
||||
loglevelnames = dict(zip(self._LOGLEVELS.values(), self._LOGLEVELS))
|
||||
rc_exists = os.path.exists(self.rcfile)
|
||||
if rc_exists and keys:
|
||||
p.read([self.rcfile])
|
||||
if not p.has_section('options'):
|
||||
p.add_section('options')
|
||||
for opt in sorted(self.options):
|
||||
if keys is not None and opt not in keys:
|
||||
continue
|
||||
if opt in ('version', 'language', 'translate_out', 'translate_in', 'overwrite_existing_translations', 'init', 'update', 'demo'):
|
||||
continue
|
||||
if opt in self.blacklist_for_save:
|
||||
continue
|
||||
if opt in ('log_level',):
|
||||
p.set('options', opt, loglevelnames.get(self.options[opt], self.options[opt]))
|
||||
elif opt == 'log_handler':
|
||||
p.set('options', opt, ','.join(_deduplicate_loggers(self.options[opt])))
|
||||
else:
|
||||
p.set('options', opt, self.options[opt])
|
||||
|
||||
for sec in sorted(self.misc):
|
||||
p.add_section(sec)
|
||||
for opt in sorted(self.misc[sec]):
|
||||
p.set(sec,opt,self.misc[sec][opt])
|
||||
|
||||
# try to create the directories and write the file
|
||||
try:
|
||||
if not rc_exists and not os.path.exists(os.path.dirname(self.rcfile)):
|
||||
os.makedirs(os.path.dirname(self.rcfile))
|
||||
try:
|
||||
p.write(open(self.rcfile, 'w'))
|
||||
if not rc_exists:
|
||||
os.chmod(self.rcfile, 0o600)
|
||||
except IOError:
|
||||
sys.stderr.write("ERROR: couldn't write the config file\n")
|
||||
|
||||
except OSError:
|
||||
# what to do if impossible?
|
||||
sys.stderr.write("ERROR: couldn't create the config directory\n")
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self.options.get(key, default)
|
||||
|
||||
def pop(self, key, default=None):
|
||||
return self.options.pop(key, default)
|
||||
|
||||
def get_misc(self, sect, key, default=None):
|
||||
return self.misc.get(sect,{}).get(key, default)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.options[key] = value
|
||||
if key in self.options and isinstance(self.options[key], str) and \
|
||||
key in self.casts and self.casts[key].type in optparse.Option.TYPE_CHECKER:
|
||||
self.options[key] = optparse.Option.TYPE_CHECKER[self.casts[key].type](self.casts[key], key, self.options[key])
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.options[key]
|
||||
|
||||
@property
|
||||
def addons_data_dir(self):
|
||||
add_dir = os.path.join(self['data_dir'], 'addons')
|
||||
d = os.path.join(add_dir, release.series)
|
||||
if not os.path.exists(d):
|
||||
try:
|
||||
# bootstrap parent dir +rwx
|
||||
if not os.path.exists(add_dir):
|
||||
os.makedirs(add_dir, 0o700)
|
||||
# try to make +rx placeholder dir, will need manual +w to activate it
|
||||
os.makedirs(d, 0o500)
|
||||
except OSError:
|
||||
logging.getLogger(__name__).debug('Failed to create addons data dir %s', d)
|
||||
return d
|
||||
|
||||
@property
|
||||
def session_dir(self):
|
||||
d = os.path.join(self['data_dir'], 'sessions')
|
||||
try:
|
||||
os.makedirs(d, 0o700)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
assert os.access(d, os.W_OK), \
|
||||
"%s: directory is not writable" % d
|
||||
return d
|
||||
|
||||
def filestore(self, dbname):
|
||||
return os.path.join(self['data_dir'], 'filestore', dbname)
|
||||
|
||||
def set_admin_password(self, new_password):
|
||||
hash_password = crypt_context.hash if hasattr(crypt_context, 'hash') else crypt_context.encrypt
|
||||
self.options['admin_passwd'] = hash_password(new_password)
|
||||
|
||||
def verify_admin_password(self, password):
|
||||
"""Verifies the super-admin password, possibly updating the stored hash if needed"""
|
||||
stored_hash = self.options['admin_passwd']
|
||||
if not stored_hash:
|
||||
# empty password/hash => authentication forbidden
|
||||
return False
|
||||
result, updated_hash = crypt_context.verify_and_update(password, stored_hash)
|
||||
if result:
|
||||
if updated_hash:
|
||||
self.options['admin_passwd'] = updated_hash
|
||||
return True
|
||||
|
||||
def _normalize(self, path):
|
||||
if not path:
|
||||
return ''
|
||||
return normcase(realpath(abspath(expanduser(expandvars(path.strip())))))
|
||||
|
||||
|
||||
config = configmanager()
|
||||
837
odoo-bringout-oca-ocb-base/odoo/tools/convert.py
Normal file
837
odoo-bringout-oca-ocb-base/odoo/tools/convert.py
Normal file
|
|
@ -0,0 +1,837 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
__all__ = [
|
||||
'convert_file', 'convert_sql_import',
|
||||
'convert_csv_import', 'convert_xml_import'
|
||||
]
|
||||
|
||||
import base64
|
||||
import io
|
||||
import logging
|
||||
import os.path
|
||||
import pprint
|
||||
import re
|
||||
import subprocess
|
||||
import warnings
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
import pytz
|
||||
from lxml import etree, builder
|
||||
try:
|
||||
import jingtrang
|
||||
except ImportError:
|
||||
jingtrang = None
|
||||
|
||||
import odoo
|
||||
from . import pycompat
|
||||
from .config import config
|
||||
from .misc import file_open, unquote, ustr, SKIPPED_ELEMENT_TYPES
|
||||
from .translate import _
|
||||
from odoo import SUPERUSER_ID, api
|
||||
from odoo.exceptions import ValidationError
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
from .safe_eval import safe_eval as s_eval, pytz, time
|
||||
safe_eval = lambda expr, ctx={}: s_eval(expr, ctx, nocopy=True)
|
||||
|
||||
class ParseError(Exception):
|
||||
...
|
||||
|
||||
class RecordDictWrapper(dict):
|
||||
"""
|
||||
Used to pass a record as locals in eval:
|
||||
records do not strictly behave like dict, so we force them to.
|
||||
"""
|
||||
def __init__(self, record):
|
||||
self.record = record
|
||||
def __getitem__(self, key):
|
||||
if key in self.record:
|
||||
return self.record[key]
|
||||
return dict.__getitem__(self, key)
|
||||
|
||||
def _get_idref(self, env, model_str, idref):
|
||||
idref2 = dict(idref,
|
||||
Command=odoo.fields.Command,
|
||||
time=time,
|
||||
DateTime=datetime,
|
||||
datetime=datetime,
|
||||
timedelta=timedelta,
|
||||
relativedelta=relativedelta,
|
||||
version=odoo.release.major_version,
|
||||
ref=self.id_get,
|
||||
pytz=pytz)
|
||||
if model_str:
|
||||
idref2['obj'] = env[model_str].browse
|
||||
return idref2
|
||||
|
||||
def _fix_multiple_roots(node):
|
||||
"""
|
||||
Surround the children of the ``node`` element of an XML field with a
|
||||
single root "data" element, to prevent having a document with multiple
|
||||
roots once parsed separately.
|
||||
|
||||
XML nodes should have one root only, but we'd like to support
|
||||
direct multiple roots in our partial documents (like inherited view architectures).
|
||||
As a convention we'll surround multiple root with a container "data" element, to be
|
||||
ignored later when parsing.
|
||||
"""
|
||||
real_nodes = [x for x in node if not isinstance(x, SKIPPED_ELEMENT_TYPES)]
|
||||
if len(real_nodes) > 1:
|
||||
data_node = etree.Element("data")
|
||||
for child in node:
|
||||
data_node.append(child)
|
||||
node.append(data_node)
|
||||
|
||||
def _eval_xml(self, node, env):
|
||||
if node.tag in ('field','value'):
|
||||
t = node.get('type','char')
|
||||
f_model = node.get('model')
|
||||
if node.get('search'):
|
||||
f_search = node.get("search")
|
||||
f_use = node.get("use",'id')
|
||||
f_name = node.get("name")
|
||||
idref2 = {}
|
||||
if f_search:
|
||||
idref2 = _get_idref(self, env, f_model, self.idref)
|
||||
q = safe_eval(f_search, idref2)
|
||||
ids = env[f_model].search(q).ids
|
||||
if f_use != 'id':
|
||||
ids = [x[f_use] for x in env[f_model].browse(ids).read([f_use])]
|
||||
_fields = env[f_model]._fields
|
||||
if (f_name in _fields) and _fields[f_name].type == 'many2many':
|
||||
return ids
|
||||
f_val = False
|
||||
if len(ids):
|
||||
f_val = ids[0]
|
||||
if isinstance(f_val, tuple):
|
||||
f_val = f_val[0]
|
||||
return f_val
|
||||
a_eval = node.get('eval')
|
||||
if a_eval:
|
||||
idref2 = _get_idref(self, env, f_model, self.idref)
|
||||
try:
|
||||
return safe_eval(a_eval, idref2)
|
||||
except Exception:
|
||||
logging.getLogger('odoo.tools.convert.init').error(
|
||||
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), env.context)
|
||||
raise
|
||||
def _process(s):
|
||||
matches = re.finditer(br'[^%]%\((.*?)\)[ds]'.decode('utf-8'), s)
|
||||
done = set()
|
||||
for m in matches:
|
||||
found = m.group()[1:]
|
||||
if found in done:
|
||||
continue
|
||||
done.add(found)
|
||||
id = m.groups()[0]
|
||||
if not id in self.idref:
|
||||
self.idref[id] = self.id_get(id)
|
||||
# So funny story: in Python 3, bytes(n: int) returns a
|
||||
# bytestring of n nuls. In Python 2 it obviously returns the
|
||||
# stringified number, which is what we're expecting here
|
||||
s = s.replace(found, str(self.idref[id]))
|
||||
s = s.replace('%%', '%') # Quite weird but it's for (somewhat) backward compatibility sake
|
||||
return s
|
||||
|
||||
if t == 'xml':
|
||||
_fix_multiple_roots(node)
|
||||
return '<?xml version="1.0"?>\n'\
|
||||
+_process("".join(etree.tostring(n, encoding='unicode') for n in node))
|
||||
if t == 'html':
|
||||
return _process("".join(etree.tostring(n, method='html', encoding='unicode') for n in node))
|
||||
|
||||
data = node.text
|
||||
if node.get('file'):
|
||||
with file_open(node.get('file'), 'rb', env=env) as f:
|
||||
data = f.read()
|
||||
|
||||
if t == 'base64':
|
||||
return base64.b64encode(data)
|
||||
|
||||
# after that, only text content makes sense
|
||||
data = pycompat.to_text(data)
|
||||
if t == 'file':
|
||||
from ..modules import module
|
||||
path = data.strip()
|
||||
if not module.get_module_resource(self.module, path):
|
||||
raise IOError("No such file or directory: '%s' in %s" % (
|
||||
path, self.module))
|
||||
return '%s,%s' % (self.module, path)
|
||||
|
||||
if t == 'char':
|
||||
return data
|
||||
|
||||
if t == 'int':
|
||||
d = data.strip()
|
||||
if d == 'None':
|
||||
return None
|
||||
return int(d)
|
||||
|
||||
if t == 'float':
|
||||
return float(data.strip())
|
||||
|
||||
if t in ('list','tuple'):
|
||||
res=[]
|
||||
for n in node.iterchildren(tag='value'):
|
||||
res.append(_eval_xml(self, n, env))
|
||||
if t=='tuple':
|
||||
return tuple(res)
|
||||
return res
|
||||
elif node.tag == "function":
|
||||
model_str = node.get('model')
|
||||
model = env[model_str]
|
||||
method_name = node.get('name')
|
||||
# determine arguments
|
||||
args = []
|
||||
kwargs = {}
|
||||
a_eval = node.get('eval')
|
||||
|
||||
if a_eval:
|
||||
idref2 = _get_idref(self, env, model_str, self.idref)
|
||||
args = list(safe_eval(a_eval, idref2))
|
||||
for child in node:
|
||||
if child.tag == 'value' and child.get('name'):
|
||||
kwargs[child.get('name')] = _eval_xml(self, child, env)
|
||||
else:
|
||||
args.append(_eval_xml(self, child, env))
|
||||
# merge current context with context in kwargs
|
||||
kwargs['context'] = {**env.context, **kwargs.get('context', {})}
|
||||
# invoke method
|
||||
return odoo.api.call_kw(model, method_name, args, kwargs)
|
||||
elif node.tag == "test":
|
||||
return node.text
|
||||
|
||||
|
||||
def str2bool(value):
|
||||
return value.lower() not in ('0', 'false', 'off')
|
||||
|
||||
def nodeattr2bool(node, attr, default=False):
|
||||
if not node.get(attr):
|
||||
return default
|
||||
val = node.get(attr).strip()
|
||||
if not val:
|
||||
return default
|
||||
return str2bool(val)
|
||||
|
||||
class xml_import(object):
|
||||
def get_env(self, node, eval_context=None):
|
||||
uid = node.get('uid')
|
||||
context = node.get('context')
|
||||
if uid or context:
|
||||
return self.env(
|
||||
user=uid and self.id_get(uid),
|
||||
context=context and {
|
||||
**self.env.context,
|
||||
**safe_eval(context, {
|
||||
'ref': self.id_get,
|
||||
**(eval_context or {})
|
||||
})
|
||||
}
|
||||
)
|
||||
return self.env
|
||||
|
||||
def make_xml_id(self, xml_id):
|
||||
if not xml_id or '.' in xml_id:
|
||||
return xml_id
|
||||
return "%s.%s" % (self.module, xml_id)
|
||||
|
||||
def _test_xml_id(self, xml_id):
|
||||
if '.' in xml_id:
|
||||
module, id = xml_id.split('.', 1)
|
||||
assert '.' not in id, """The ID reference "%s" must contain
|
||||
maximum one dot. They are used to refer to other modules ID, in the
|
||||
form: module.record_id""" % (xml_id,)
|
||||
if module != self.module:
|
||||
modcnt = self.env['ir.module.module'].search_count([('name', '=', module), ('state', '=', 'installed')])
|
||||
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
|
||||
|
||||
def _tag_delete(self, rec):
|
||||
d_model = rec.get("model")
|
||||
records = self.env[d_model]
|
||||
|
||||
d_search = rec.get("search")
|
||||
if d_search:
|
||||
idref = _get_idref(self, self.env, d_model, {})
|
||||
try:
|
||||
records = records.search(safe_eval(d_search, idref))
|
||||
except ValueError:
|
||||
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
|
||||
|
||||
d_id = rec.get("id")
|
||||
if d_id:
|
||||
try:
|
||||
records += records.browse(self.id_get(d_id))
|
||||
except ValueError:
|
||||
# d_id cannot be found. doesn't matter in this case
|
||||
_logger.warning('Skipping deletion for missing XML ID `%r`', d_id, exc_info=True)
|
||||
|
||||
if records:
|
||||
records.unlink()
|
||||
|
||||
def _tag_report(self, rec):
|
||||
res = {}
|
||||
for dest,f in (('name','string'),('model','model'),('report_name','name')):
|
||||
res[dest] = rec.get(f)
|
||||
assert res[dest], "Attribute %s of report is empty !" % (f,)
|
||||
for field, dest in (('attachment', 'attachment'),
|
||||
('attachment_use', 'attachment_use'),
|
||||
('usage', 'usage'),
|
||||
('file', 'report_file'),
|
||||
('report_type', 'report_type'),
|
||||
('parser', 'parser'),
|
||||
('print_report_name', 'print_report_name'),
|
||||
):
|
||||
if rec.get(field):
|
||||
res[dest] = rec.get(field)
|
||||
if rec.get('auto'):
|
||||
res['auto'] = safe_eval(rec.get('auto','False'))
|
||||
if rec.get('header'):
|
||||
res['header'] = safe_eval(rec.get('header','False'))
|
||||
|
||||
res['multi'] = rec.get('multi') and safe_eval(rec.get('multi','False'))
|
||||
|
||||
xml_id = rec.get('id','')
|
||||
self._test_xml_id(xml_id)
|
||||
warnings.warn(f"The <report> tag is deprecated, use a <record> tag for {xml_id!r}.", DeprecationWarning)
|
||||
|
||||
if rec.get('groups'):
|
||||
g_names = rec.get('groups','').split(',')
|
||||
groups_value = []
|
||||
for group in g_names:
|
||||
if group.startswith('-'):
|
||||
group_id = self.id_get(group[1:])
|
||||
groups_value.append(odoo.Command.unlink(group_id))
|
||||
else:
|
||||
group_id = self.id_get(group)
|
||||
groups_value.append(odoo.Command.link(group_id))
|
||||
res['groups_id'] = groups_value
|
||||
if rec.get('paperformat'):
|
||||
pf_name = rec.get('paperformat')
|
||||
pf_id = self.id_get(pf_name)
|
||||
res['paperformat_id'] = pf_id
|
||||
|
||||
xid = self.make_xml_id(xml_id)
|
||||
data = dict(xml_id=xid, values=res, noupdate=self.noupdate)
|
||||
report = self.env['ir.actions.report']._load_records([data], self.mode == 'update')
|
||||
self.idref[xml_id] = report.id
|
||||
|
||||
if not rec.get('menu') or safe_eval(rec.get('menu','False')):
|
||||
report.create_action()
|
||||
elif self.mode=='update' and safe_eval(rec.get('menu','False'))==False:
|
||||
# Special check for report having attribute menu=False on update
|
||||
report.unlink_action()
|
||||
return report.id
|
||||
|
||||
def _tag_function(self, rec):
|
||||
if self.noupdate and self.mode != 'init':
|
||||
return
|
||||
env = self.get_env(rec)
|
||||
_eval_xml(self, rec, env)
|
||||
|
||||
def _tag_act_window(self, rec):
|
||||
name = rec.get('name')
|
||||
xml_id = rec.get('id','')
|
||||
self._test_xml_id(xml_id)
|
||||
warnings.warn(f"The <act_window> tag is deprecated, use a <record> for {xml_id!r}.", DeprecationWarning)
|
||||
view_id = False
|
||||
if rec.get('view_id'):
|
||||
view_id = self.id_get(rec.get('view_id'))
|
||||
domain = rec.get('domain') or '[]'
|
||||
res_model = rec.get('res_model')
|
||||
binding_model = rec.get('binding_model')
|
||||
view_mode = rec.get('view_mode') or 'tree,form'
|
||||
usage = rec.get('usage')
|
||||
limit = rec.get('limit')
|
||||
uid = self.env.user.id
|
||||
|
||||
# Act_window's 'domain' and 'context' contain mostly literals
|
||||
# but they can also refer to the variables provided below
|
||||
# in eval_context, so we need to eval() them before storing.
|
||||
# Among the context variables, 'active_id' refers to
|
||||
# the currently selected items in a list view, and only
|
||||
# takes meaning at runtime on the client side. For this
|
||||
# reason it must remain a bare variable in domain and context,
|
||||
# even after eval() at server-side. We use the special 'unquote'
|
||||
# class to achieve this effect: a string which has itself, unquoted,
|
||||
# as representation.
|
||||
active_id = unquote("active_id")
|
||||
active_ids = unquote("active_ids")
|
||||
active_model = unquote("active_model")
|
||||
|
||||
# Include all locals() in eval_context, for backwards compatibility
|
||||
eval_context = {
|
||||
'name': name,
|
||||
'xml_id': xml_id,
|
||||
'type': 'ir.actions.act_window',
|
||||
'view_id': view_id,
|
||||
'domain': domain,
|
||||
'res_model': res_model,
|
||||
'src_model': binding_model,
|
||||
'view_mode': view_mode,
|
||||
'usage': usage,
|
||||
'limit': limit,
|
||||
'uid': uid,
|
||||
'active_id': active_id,
|
||||
'active_ids': active_ids,
|
||||
'active_model': active_model,
|
||||
}
|
||||
context = self.get_env(rec, eval_context).context
|
||||
|
||||
try:
|
||||
domain = safe_eval(domain, eval_context)
|
||||
except (ValueError, NameError):
|
||||
# Some domains contain references that are only valid at runtime at
|
||||
# client-side, so in that case we keep the original domain string
|
||||
# as it is. We also log it, just in case.
|
||||
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
|
||||
'at server-side, keeping original string, in case it\'s meant for client side only',
|
||||
domain, xml_id or 'n/a', exc_info=True)
|
||||
res = {
|
||||
'name': name,
|
||||
'type': 'ir.actions.act_window',
|
||||
'view_id': view_id,
|
||||
'domain': domain,
|
||||
'context': context,
|
||||
'res_model': res_model,
|
||||
'view_mode': view_mode,
|
||||
'usage': usage,
|
||||
'limit': limit,
|
||||
}
|
||||
|
||||
if rec.get('groups'):
|
||||
g_names = rec.get('groups','').split(',')
|
||||
groups_value = []
|
||||
for group in g_names:
|
||||
if group.startswith('-'):
|
||||
group_id = self.id_get(group[1:])
|
||||
groups_value.append(odoo.Command.unlink(group_id))
|
||||
else:
|
||||
group_id = self.id_get(group)
|
||||
groups_value.append(odoo.Command.link(group_id))
|
||||
res['groups_id'] = groups_value
|
||||
|
||||
if rec.get('target'):
|
||||
res['target'] = rec.get('target','')
|
||||
if binding_model:
|
||||
res['binding_model_id'] = self.env['ir.model']._get(binding_model).id
|
||||
res['binding_type'] = rec.get('binding_type') or 'action'
|
||||
views = rec.get('binding_views')
|
||||
if views is not None:
|
||||
res['binding_view_types'] = views
|
||||
xid = self.make_xml_id(xml_id)
|
||||
data = dict(xml_id=xid, values=res, noupdate=self.noupdate)
|
||||
self.env['ir.actions.act_window']._load_records([data], self.mode == 'update')
|
||||
|
||||
def _tag_menuitem(self, rec, parent=None):
|
||||
rec_id = rec.attrib["id"]
|
||||
self._test_xml_id(rec_id)
|
||||
|
||||
# The parent attribute was specified, if non-empty determine its ID, otherwise
|
||||
# explicitly make a top-level menu
|
||||
values = {
|
||||
'parent_id': False,
|
||||
'active': nodeattr2bool(rec, 'active', default=True),
|
||||
}
|
||||
|
||||
if rec.get('sequence'):
|
||||
values['sequence'] = int(rec.get('sequence'))
|
||||
|
||||
if parent is not None:
|
||||
values['parent_id'] = parent
|
||||
elif rec.get('parent'):
|
||||
values['parent_id'] = self.id_get(rec.attrib['parent'])
|
||||
elif rec.get('web_icon'):
|
||||
values['web_icon'] = rec.attrib['web_icon']
|
||||
|
||||
|
||||
if rec.get('name'):
|
||||
values['name'] = rec.attrib['name']
|
||||
|
||||
if rec.get('action'):
|
||||
a_action = rec.attrib['action']
|
||||
|
||||
if '.' not in a_action:
|
||||
a_action = '%s.%s' % (self.module, a_action)
|
||||
act = self.env.ref(a_action).sudo()
|
||||
values['action'] = "%s,%d" % (act.type, act.id)
|
||||
|
||||
if not values.get('name') and act.type.endswith(('act_window', 'wizard', 'url', 'client', 'server')) and act.name:
|
||||
values['name'] = act.name
|
||||
|
||||
if not values.get('name'):
|
||||
values['name'] = rec_id or '?'
|
||||
|
||||
|
||||
groups = []
|
||||
for group in rec.get('groups', '').split(','):
|
||||
if group.startswith('-'):
|
||||
group_id = self.id_get(group[1:])
|
||||
groups.append(odoo.Command.unlink(group_id))
|
||||
elif group:
|
||||
group_id = self.id_get(group)
|
||||
groups.append(odoo.Command.link(group_id))
|
||||
if groups:
|
||||
values['groups_id'] = groups
|
||||
|
||||
|
||||
data = {
|
||||
'xml_id': self.make_xml_id(rec_id),
|
||||
'values': values,
|
||||
'noupdate': self.noupdate,
|
||||
}
|
||||
menu = self.env['ir.ui.menu']._load_records([data], self.mode == 'update')
|
||||
for child in rec.iterchildren('menuitem'):
|
||||
self._tag_menuitem(child, parent=menu.id)
|
||||
|
||||
def _tag_record(self, rec, extra_vals=None):
|
||||
rec_model = rec.get("model")
|
||||
env = self.get_env(rec)
|
||||
rec_id = rec.get("id", '')
|
||||
|
||||
model = env[rec_model]
|
||||
|
||||
if self.xml_filename and rec_id:
|
||||
model = model.with_context(
|
||||
install_module=self.module,
|
||||
install_filename=self.xml_filename,
|
||||
install_xmlid=rec_id,
|
||||
)
|
||||
|
||||
self._test_xml_id(rec_id)
|
||||
xid = self.make_xml_id(rec_id)
|
||||
|
||||
# in update mode, the record won't be updated if the data node explicitly
|
||||
# opt-out using @noupdate="1". A second check will be performed in
|
||||
# model._load_records() using the record's ir.model.data `noupdate` field.
|
||||
if self.noupdate and self.mode != 'init':
|
||||
# check if the xml record has no id, skip
|
||||
if not rec_id:
|
||||
return None
|
||||
|
||||
record = env['ir.model.data']._load_xmlid(xid)
|
||||
for child in rec.xpath('.//record[@id]'):
|
||||
sub_xid = child.get("id")
|
||||
self._test_xml_id(sub_xid)
|
||||
sub_xid = self.make_xml_id(sub_xid)
|
||||
sub_record = env['ir.model.data']._load_xmlid(sub_xid)
|
||||
if sub_record:
|
||||
self.idref[sub_xid] = sub_record.id
|
||||
|
||||
if record:
|
||||
# if the resource already exists, don't update it but store
|
||||
# its database id (can be useful)
|
||||
self.idref[rec_id] = record.id
|
||||
return None
|
||||
elif not nodeattr2bool(rec, 'forcecreate', True):
|
||||
# if it doesn't exist and we shouldn't create it, skip it
|
||||
return None
|
||||
# else create it normally
|
||||
|
||||
if xid and xid.partition('.')[0] != self.module:
|
||||
# updating a record created by another module
|
||||
record = self.env['ir.model.data']._load_xmlid(xid)
|
||||
if not record:
|
||||
if self.noupdate and not nodeattr2bool(rec, 'forcecreate', True):
|
||||
# if it doesn't exist and we shouldn't create it, skip it
|
||||
return None
|
||||
raise Exception("Cannot update missing record %r" % xid)
|
||||
|
||||
res = {}
|
||||
sub_records = []
|
||||
for field in rec.findall('./field'):
|
||||
#TODO: most of this code is duplicated above (in _eval_xml)...
|
||||
f_name = field.get("name")
|
||||
f_ref = field.get("ref")
|
||||
f_search = field.get("search")
|
||||
f_model = field.get("model")
|
||||
if not f_model and f_name in model._fields:
|
||||
f_model = model._fields[f_name].comodel_name
|
||||
f_use = field.get("use",'') or 'id'
|
||||
f_val = False
|
||||
|
||||
if f_search:
|
||||
idref2 = _get_idref(self, env, f_model, self.idref)
|
||||
q = safe_eval(f_search, idref2)
|
||||
assert f_model, 'Define an attribute model="..." in your .XML file !'
|
||||
# browse the objects searched
|
||||
s = env[f_model].search(q)
|
||||
# column definitions of the "local" object
|
||||
_fields = env[rec_model]._fields
|
||||
# if the current field is many2many
|
||||
if (f_name in _fields) and _fields[f_name].type == 'many2many':
|
||||
f_val = [odoo.Command.set([x[f_use] for x in s])]
|
||||
elif len(s):
|
||||
# otherwise (we are probably in a many2one field),
|
||||
# take the first element of the search
|
||||
f_val = s[0][f_use]
|
||||
elif f_ref:
|
||||
if f_name in model._fields and model._fields[f_name].type == 'reference':
|
||||
val = self.model_id_get(f_ref)
|
||||
f_val = val[0] + ',' + str(val[1])
|
||||
else:
|
||||
f_val = self.id_get(f_ref, raise_if_not_found=nodeattr2bool(rec, 'forcecreate', True))
|
||||
if not f_val:
|
||||
_logger.warning("Skipping creation of %r because %s=%r could not be resolved", xid, f_name, f_ref)
|
||||
return None
|
||||
else:
|
||||
f_val = _eval_xml(self, field, env)
|
||||
if f_name in model._fields:
|
||||
field_type = model._fields[f_name].type
|
||||
if field_type == 'many2one':
|
||||
f_val = int(f_val) if f_val else False
|
||||
elif field_type == 'integer':
|
||||
f_val = int(f_val)
|
||||
elif field_type in ('float', 'monetary'):
|
||||
f_val = float(f_val)
|
||||
elif field_type == 'boolean' and isinstance(f_val, str):
|
||||
f_val = str2bool(f_val)
|
||||
elif field_type == 'one2many':
|
||||
for child in field.findall('./record'):
|
||||
sub_records.append((child, model._fields[f_name].inverse_name))
|
||||
if isinstance(f_val, str):
|
||||
# We do not want to write on the field since we will write
|
||||
# on the childrens' parents later
|
||||
continue
|
||||
elif field_type == 'html':
|
||||
if field.get('type') == 'xml':
|
||||
_logger.warning('HTML field %r is declared as `type="xml"`', f_name)
|
||||
res[f_name] = f_val
|
||||
if extra_vals:
|
||||
res.update(extra_vals)
|
||||
|
||||
data = dict(xml_id=xid, values=res, noupdate=self.noupdate)
|
||||
record = model._load_records([data], self.mode == 'update')
|
||||
if rec_id:
|
||||
self.idref[rec_id] = record.id
|
||||
if config.get('import_partial'):
|
||||
env.cr.commit()
|
||||
for child_rec, inverse_name in sub_records:
|
||||
self._tag_record(child_rec, extra_vals={inverse_name: record.id})
|
||||
return rec_model, record.id
|
||||
|
||||
def _tag_template(self, el):
|
||||
# This helper transforms a <template> element into a <record> and forwards it
|
||||
tpl_id = el.get('id', el.get('t-name'))
|
||||
full_tpl_id = tpl_id
|
||||
if '.' not in full_tpl_id:
|
||||
full_tpl_id = '%s.%s' % (self.module, tpl_id)
|
||||
# set the full template name for qweb <module>.<id>
|
||||
if not el.get('inherit_id'):
|
||||
el.set('t-name', full_tpl_id)
|
||||
el.tag = 't'
|
||||
else:
|
||||
el.tag = 'data'
|
||||
el.attrib.pop('id', None)
|
||||
|
||||
if self.module.startswith('theme_'):
|
||||
model = 'theme.ir.ui.view'
|
||||
else:
|
||||
model = 'ir.ui.view'
|
||||
|
||||
record_attrs = {
|
||||
'id': tpl_id,
|
||||
'model': model,
|
||||
}
|
||||
for att in ['forcecreate', 'context']:
|
||||
if att in el.attrib:
|
||||
record_attrs[att] = el.attrib.pop(att)
|
||||
|
||||
Field = builder.E.field
|
||||
name = el.get('name', tpl_id)
|
||||
|
||||
record = etree.Element('record', attrib=record_attrs)
|
||||
record.append(Field(name, name='name'))
|
||||
record.append(Field(full_tpl_id, name='key'))
|
||||
record.append(Field("qweb", name='type'))
|
||||
if 'track' in el.attrib:
|
||||
record.append(Field(el.get('track'), name='track'))
|
||||
if 'priority' in el.attrib:
|
||||
record.append(Field(el.get('priority'), name='priority'))
|
||||
if 'inherit_id' in el.attrib:
|
||||
record.append(Field(name='inherit_id', ref=el.get('inherit_id')))
|
||||
if 'website_id' in el.attrib:
|
||||
record.append(Field(name='website_id', ref=el.get('website_id')))
|
||||
if 'key' in el.attrib:
|
||||
record.append(Field(el.get('key'), name='key'))
|
||||
if el.get('active') in ("True", "False"):
|
||||
view_id = self.id_get(tpl_id, raise_if_not_found=False)
|
||||
if self.mode != "update" or not view_id:
|
||||
record.append(Field(name='active', eval=el.get('active')))
|
||||
if el.get('customize_show') in ("True", "False"):
|
||||
record.append(Field(name='customize_show', eval=el.get('customize_show')))
|
||||
groups = el.attrib.pop('groups', None)
|
||||
if groups:
|
||||
grp_lst = [("ref('%s')" % x) for x in groups.split(',')]
|
||||
record.append(Field(name="groups_id", eval="[Command.set(["+', '.join(grp_lst)+"])]"))
|
||||
if el.get('primary') == 'True':
|
||||
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
|
||||
el.append(
|
||||
builder.E.xpath(
|
||||
builder.E.attribute(full_tpl_id, name='t-name'),
|
||||
expr=".",
|
||||
position="attributes",
|
||||
)
|
||||
)
|
||||
record.append(Field('primary', name='mode'))
|
||||
# inject complete <template> element (after changing node name) into
|
||||
# the ``arch`` field
|
||||
record.append(Field(el, name="arch", type="xml"))
|
||||
|
||||
return self._tag_record(record)
|
||||
|
||||
def id_get(self, id_str, raise_if_not_found=True):
|
||||
if id_str in self.idref:
|
||||
return self.idref[id_str]
|
||||
res = self.model_id_get(id_str, raise_if_not_found)
|
||||
return res and res[1]
|
||||
|
||||
def model_id_get(self, id_str, raise_if_not_found=True):
|
||||
if '.' not in id_str:
|
||||
id_str = '%s.%s' % (self.module, id_str)
|
||||
return self.env['ir.model.data']._xmlid_to_res_model_res_id(id_str, raise_if_not_found=raise_if_not_found)
|
||||
|
||||
def _tag_root(self, el):
|
||||
for rec in el:
|
||||
f = self._tags.get(rec.tag)
|
||||
if f is None:
|
||||
continue
|
||||
|
||||
self.envs.append(self.get_env(el))
|
||||
self._noupdate.append(nodeattr2bool(el, 'noupdate', self.noupdate))
|
||||
try:
|
||||
f(rec)
|
||||
except ParseError:
|
||||
raise
|
||||
except ValidationError as err:
|
||||
msg = "while parsing {file}:{viewline}\n{err}\n\nView error context:\n{context}\n".format(
|
||||
file=rec.getroottree().docinfo.URL,
|
||||
viewline=rec.sourceline,
|
||||
context=pprint.pformat(getattr(err, 'context', None) or '-no context-'),
|
||||
err=err.args[0],
|
||||
)
|
||||
_logger.debug(msg, exc_info=True)
|
||||
raise ParseError(msg) from None # Restart with "--log-handler odoo.tools.convert:DEBUG" for complete traceback
|
||||
except Exception as e:
|
||||
raise ParseError('while parsing %s:%s, somewhere inside\n%s' % (
|
||||
rec.getroottree().docinfo.URL,
|
||||
rec.sourceline,
|
||||
etree.tostring(rec, encoding='unicode').rstrip()
|
||||
)) from e
|
||||
finally:
|
||||
self._noupdate.pop()
|
||||
self.envs.pop()
|
||||
|
||||
@property
|
||||
def env(self):
|
||||
return self.envs[-1]
|
||||
|
||||
@property
|
||||
def noupdate(self):
|
||||
return self._noupdate[-1]
|
||||
|
||||
def __init__(self, cr, module, idref, mode, noupdate=False, xml_filename=None):
|
||||
self.mode = mode
|
||||
self.module = module
|
||||
self.envs = [odoo.api.Environment(cr, SUPERUSER_ID, {})]
|
||||
self.idref = {} if idref is None else idref
|
||||
self._noupdate = [noupdate]
|
||||
self.xml_filename = xml_filename
|
||||
self._tags = {
|
||||
'record': self._tag_record,
|
||||
'delete': self._tag_delete,
|
||||
'function': self._tag_function,
|
||||
'menuitem': self._tag_menuitem,
|
||||
'template': self._tag_template,
|
||||
'report': self._tag_report,
|
||||
'act_window': self._tag_act_window,
|
||||
|
||||
**dict.fromkeys(self.DATA_ROOTS, self._tag_root)
|
||||
}
|
||||
|
||||
def parse(self, de):
|
||||
assert de.tag in self.DATA_ROOTS, "Root xml tag must be <openerp>, <odoo> or <data>."
|
||||
self._tag_root(de)
|
||||
DATA_ROOTS = ['odoo', 'data', 'openerp']
|
||||
|
||||
def convert_file(cr, module, filename, idref, mode='update', noupdate=False, kind=None, pathname=None):
|
||||
if pathname is None:
|
||||
pathname = os.path.join(module, filename)
|
||||
ext = os.path.splitext(filename)[1].lower()
|
||||
|
||||
with file_open(pathname, 'rb') as fp:
|
||||
if ext == '.csv':
|
||||
convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate)
|
||||
elif ext == '.sql':
|
||||
convert_sql_import(cr, fp)
|
||||
elif ext == '.xml':
|
||||
convert_xml_import(cr, module, fp, idref, mode, noupdate)
|
||||
elif ext == '.js':
|
||||
pass # .js files are valid but ignored here.
|
||||
else:
|
||||
raise ValueError("Can't load unknown file type %s.", filename)
|
||||
|
||||
def convert_sql_import(cr, fp):
|
||||
cr.execute(fp.read()) # pylint: disable=sql-injection
|
||||
|
||||
def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
|
||||
noupdate=False):
|
||||
'''Import csv file :
|
||||
quote: "
|
||||
delimiter: ,
|
||||
encoding: utf-8'''
|
||||
filename, _ext = os.path.splitext(os.path.basename(fname))
|
||||
model = filename.split('-')[0]
|
||||
reader = pycompat.csv_reader(io.BytesIO(csvcontent), quotechar='"', delimiter=',')
|
||||
fields = next(reader)
|
||||
|
||||
if not (mode == 'init' or 'id' in fields):
|
||||
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
|
||||
return
|
||||
|
||||
# filter out empty lines (any([]) == False) and lines containing only empty cells
|
||||
datas = [
|
||||
line for line in reader
|
||||
if any(line)
|
||||
]
|
||||
|
||||
context = {
|
||||
'mode': mode,
|
||||
'module': module,
|
||||
'install_module': module,
|
||||
'install_filename': fname,
|
||||
'noupdate': noupdate,
|
||||
}
|
||||
env = odoo.api.Environment(cr, SUPERUSER_ID, context)
|
||||
result = env[model].load(fields, datas)
|
||||
if any(msg['type'] == 'error' for msg in result['messages']):
|
||||
# Report failed import and abort module install
|
||||
warning_msg = "\n".join(msg['message'] for msg in result['messages'])
|
||||
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
|
||||
|
||||
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
|
||||
doc = etree.parse(xmlfile)
|
||||
schema = os.path.join(config['root_path'], 'import_xml.rng')
|
||||
relaxng = etree.RelaxNG(etree.parse(schema))
|
||||
try:
|
||||
relaxng.assert_(doc)
|
||||
except Exception:
|
||||
_logger.exception("The XML file '%s' does not fit the required schema !", xmlfile.name)
|
||||
if jingtrang:
|
||||
p = subprocess.run(['pyjing', schema, xmlfile.name], stdout=subprocess.PIPE)
|
||||
_logger.warning(p.stdout.decode())
|
||||
else:
|
||||
for e in relaxng.error_log:
|
||||
_logger.warning(e)
|
||||
_logger.info("Install 'jingtrang' for more precise and useful validation messages.")
|
||||
raise
|
||||
|
||||
if isinstance(xmlfile, str):
|
||||
xml_filename = xmlfile
|
||||
else:
|
||||
xml_filename = xmlfile.name
|
||||
obj = xml_import(cr, module, idref, mode, noupdate=noupdate, xml_filename=xml_filename)
|
||||
obj.parse(doc.getroot())
|
||||
BIN
odoo-bringout-oca-ocb-base/odoo/tools/data/files/sRGB2014.icc
Normal file
BIN
odoo-bringout-oca-ocb-base/odoo/tools/data/files/sRGB2014.icc
Normal file
Binary file not shown.
|
|
@ -0,0 +1,5 @@
|
|||
Copyright (c) 2015 International Color Consortium
|
||||
|
||||
This profile is made available by the International Color Consortium, and may be copied, distributed, embedded, made,
|
||||
used, and sold without restriction. Altered versions of this profile shall have the original identification and
|
||||
copyright information removed and shall not be misrepresented as the original profile.
|
||||
301
odoo-bringout-oca-ocb-base/odoo/tools/date_utils.py
Normal file
301
odoo-bringout-oca-ocb-base/odoo/tools/date_utils.py
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import math
|
||||
import calendar
|
||||
from datetime import date, datetime, time
|
||||
from typing import Tuple
|
||||
|
||||
import babel
|
||||
import pytz
|
||||
from dateutil.relativedelta import relativedelta, weekdays
|
||||
|
||||
from .func import lazy
|
||||
from odoo.loglevels import ustr
|
||||
|
||||
|
||||
def date_type(value):
|
||||
''' Return either the datetime.datetime class or datetime.date type whether `value` is a datetime or a date.
|
||||
|
||||
:param value: A datetime.datetime or datetime.date object.
|
||||
:return: datetime.datetime or datetime.date
|
||||
'''
|
||||
return datetime if isinstance(value, datetime) else date
|
||||
|
||||
|
||||
def get_month(date):
|
||||
''' Compute the month dates range on which the 'date' parameter belongs to.
|
||||
|
||||
:param date: A datetime.datetime or datetime.date object.
|
||||
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
|
||||
'''
|
||||
date_from = date_type(date)(date.year, date.month, 1)
|
||||
date_to = date_type(date)(date.year, date.month, calendar.monthrange(date.year, date.month)[1])
|
||||
return date_from, date_to
|
||||
|
||||
|
||||
def get_quarter_number(date):
|
||||
''' Get the number of the quarter on which the 'date' parameter belongs to.
|
||||
|
||||
:param date: A datetime.datetime or datetime.date object.
|
||||
:return: A [1-4] integer.
|
||||
'''
|
||||
return math.ceil(date.month / 3)
|
||||
|
||||
|
||||
def get_quarter(date):
|
||||
''' Compute the quarter dates range on which the 'date' parameter belongs to.
|
||||
|
||||
:param date: A datetime.datetime or datetime.date object.
|
||||
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
|
||||
'''
|
||||
quarter_number = get_quarter_number(date)
|
||||
month_from = ((quarter_number - 1) * 3) + 1
|
||||
date_from = date_type(date)(date.year, month_from, 1)
|
||||
date_to = (date_from + relativedelta(months=2))
|
||||
date_to = date_to.replace(day=calendar.monthrange(date_to.year, date_to.month)[1])
|
||||
return date_from, date_to
|
||||
|
||||
|
||||
def get_fiscal_year(date, day=31, month=12):
|
||||
''' Compute the fiscal year dates range on which the 'date' parameter belongs to.
|
||||
A fiscal year is the period used by governments for accounting purposes and vary between countries.
|
||||
By default, calling this method with only one parameter gives the calendar year because the ending date of the
|
||||
fiscal year is set to the YYYY-12-31.
|
||||
:param date: A datetime.datetime or datetime.date object.
|
||||
:param day: The day of month the fiscal year ends.
|
||||
:param month: The month of year the fiscal year ends.
|
||||
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
|
||||
'''
|
||||
|
||||
def fix_day(year, month, day):
|
||||
max_day = calendar.monthrange(year, month)[1]
|
||||
if month == 2 and day in (28, max_day):
|
||||
return max_day
|
||||
return min(day, max_day)
|
||||
|
||||
day = fix_day(date.year, month, day)
|
||||
date_to = date_type(date)(date.year, month, day)
|
||||
|
||||
if date <= date_to:
|
||||
date_from = date_to - relativedelta(years=1)
|
||||
day = fix_day(date_from.year, date_from.month, date_from.day)
|
||||
date_from = date_type(date)(date_from.year, date_from.month, day)
|
||||
date_from += relativedelta(days=1)
|
||||
else:
|
||||
date_from = date_to + relativedelta(days=1)
|
||||
date_to = date_to + relativedelta(years=1)
|
||||
day = fix_day(date_to.year, date_to.month, date_to.day)
|
||||
date_to = date_type(date)(date_to.year, date_to.month, day)
|
||||
return date_from, date_to
|
||||
|
||||
|
||||
def get_timedelta(qty, granularity):
|
||||
"""
|
||||
Helper to get a `relativedelta` object for the given quantity and interval unit.
|
||||
:param qty: the number of unit to apply on the timedelta to return
|
||||
:param granularity: Type of period in string, can be year, quarter, month, week, day or hour.
|
||||
|
||||
"""
|
||||
switch = {
|
||||
'hour': relativedelta(hours=qty),
|
||||
'day': relativedelta(days=qty),
|
||||
'week': relativedelta(weeks=qty),
|
||||
'month': relativedelta(months=qty),
|
||||
'year': relativedelta(years=qty),
|
||||
}
|
||||
return switch[granularity]
|
||||
|
||||
|
||||
def start_of(value, granularity):
|
||||
"""
|
||||
Get start of a time period from a date or a datetime.
|
||||
|
||||
:param value: initial date or datetime.
|
||||
:param granularity: type of period in string, can be year, quarter, month, week, day or hour.
|
||||
:return: a date/datetime object corresponding to the start of the specified period.
|
||||
"""
|
||||
is_datetime = isinstance(value, datetime)
|
||||
if granularity == "year":
|
||||
result = value.replace(month=1, day=1)
|
||||
elif granularity == "quarter":
|
||||
# Q1 = Jan 1st
|
||||
# Q2 = Apr 1st
|
||||
# Q3 = Jul 1st
|
||||
# Q4 = Oct 1st
|
||||
result = get_quarter(value)[0]
|
||||
elif granularity == "month":
|
||||
result = value.replace(day=1)
|
||||
elif granularity == 'week':
|
||||
# `calendar.weekday` uses ISO8601 for start of week reference, this means that
|
||||
# by default MONDAY is the first day of the week and SUNDAY is the last.
|
||||
result = value - relativedelta(days=calendar.weekday(value.year, value.month, value.day))
|
||||
elif granularity == "day":
|
||||
result = value
|
||||
elif granularity == "hour" and is_datetime:
|
||||
return datetime.combine(value, time.min).replace(hour=value.hour)
|
||||
elif is_datetime:
|
||||
raise ValueError(
|
||||
"Granularity must be year, quarter, month, week, day or hour for value %s" % value
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Granularity must be year, quarter, month, week or day for value %s" % value
|
||||
)
|
||||
|
||||
return datetime.combine(result, time.min) if is_datetime else result
|
||||
|
||||
|
||||
def end_of(value, granularity):
|
||||
"""
|
||||
Get end of a time period from a date or a datetime.
|
||||
|
||||
:param value: initial date or datetime.
|
||||
:param granularity: Type of period in string, can be year, quarter, month, week, day or hour.
|
||||
:return: A date/datetime object corresponding to the start of the specified period.
|
||||
"""
|
||||
is_datetime = isinstance(value, datetime)
|
||||
if granularity == "year":
|
||||
result = value.replace(month=12, day=31)
|
||||
elif granularity == "quarter":
|
||||
# Q1 = Mar 31st
|
||||
# Q2 = Jun 30th
|
||||
# Q3 = Sep 30th
|
||||
# Q4 = Dec 31st
|
||||
result = get_quarter(value)[1]
|
||||
elif granularity == "month":
|
||||
result = value + relativedelta(day=1, months=1, days=-1)
|
||||
elif granularity == 'week':
|
||||
# `calendar.weekday` uses ISO8601 for start of week reference, this means that
|
||||
# by default MONDAY is the first day of the week and SUNDAY is the last.
|
||||
result = value + relativedelta(days=6-calendar.weekday(value.year, value.month, value.day))
|
||||
elif granularity == "day":
|
||||
result = value
|
||||
elif granularity == "hour" and is_datetime:
|
||||
return datetime.combine(value, time.max).replace(hour=value.hour)
|
||||
elif is_datetime:
|
||||
raise ValueError(
|
||||
"Granularity must be year, quarter, month, week, day or hour for value %s" % value
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Granularity must be year, quarter, month, week or day for value %s" % value
|
||||
)
|
||||
|
||||
return datetime.combine(result, time.max) if is_datetime else result
|
||||
|
||||
|
||||
def add(value, *args, **kwargs):
|
||||
"""
|
||||
Return the sum of ``value`` and a :class:`relativedelta`.
|
||||
|
||||
:param value: initial date or datetime.
|
||||
:param args: positional args to pass directly to :class:`relativedelta`.
|
||||
:param kwargs: keyword args to pass directly to :class:`relativedelta`.
|
||||
:return: the resulting date/datetime.
|
||||
"""
|
||||
return value + relativedelta(*args, **kwargs)
|
||||
|
||||
|
||||
def subtract(value, *args, **kwargs):
|
||||
"""
|
||||
Return the difference between ``value`` and a :class:`relativedelta`.
|
||||
|
||||
:param value: initial date or datetime.
|
||||
:param args: positional args to pass directly to :class:`relativedelta`.
|
||||
:param kwargs: keyword args to pass directly to :class:`relativedelta`.
|
||||
:return: the resulting date/datetime.
|
||||
"""
|
||||
return value - relativedelta(*args, **kwargs)
|
||||
|
||||
def json_default(obj):
|
||||
"""
|
||||
Properly serializes date and datetime objects.
|
||||
"""
|
||||
from odoo import fields
|
||||
if isinstance(obj, datetime):
|
||||
return fields.Datetime.to_string(obj)
|
||||
if isinstance(obj, date):
|
||||
return fields.Date.to_string(obj)
|
||||
if isinstance(obj, lazy):
|
||||
return obj._value
|
||||
return ustr(obj)
|
||||
|
||||
|
||||
def date_range(start, end, step=relativedelta(months=1)):
|
||||
"""Date range generator with a step interval.
|
||||
|
||||
:param datetime start: beginning date of the range.
|
||||
:param datetime end: ending date of the range.
|
||||
:param relativedelta step: interval of the range.
|
||||
:return: a range of datetime from start to end.
|
||||
:rtype: Iterator[datetime]
|
||||
"""
|
||||
|
||||
are_naive = start.tzinfo is None and end.tzinfo is None
|
||||
are_utc = start.tzinfo == pytz.utc and end.tzinfo == pytz.utc
|
||||
|
||||
# Cases with miscellenous timezone are more complexe because of DST.
|
||||
are_others = start.tzinfo and end.tzinfo and not are_utc
|
||||
|
||||
if are_others:
|
||||
if start.tzinfo.zone != end.tzinfo.zone:
|
||||
raise ValueError("Timezones of start argument and end argument seem inconsistent")
|
||||
|
||||
if not are_naive and not are_utc and not are_others:
|
||||
raise ValueError("Timezones of start argument and end argument mismatch")
|
||||
|
||||
if start > end:
|
||||
raise ValueError("start > end, start date must be before end")
|
||||
|
||||
if start == start + step:
|
||||
raise ValueError("Looks like step is null")
|
||||
|
||||
if start.tzinfo:
|
||||
localize = start.tzinfo.localize
|
||||
else:
|
||||
localize = lambda dt: dt
|
||||
|
||||
dt = start.replace(tzinfo=None)
|
||||
end = end.replace(tzinfo=None)
|
||||
while dt <= end:
|
||||
yield localize(dt)
|
||||
dt = dt + step
|
||||
|
||||
|
||||
def weeknumber(locale: babel.Locale, date: date) -> Tuple[int, int]:
|
||||
"""Computes the year and weeknumber of `date`. The week number is 1-indexed
|
||||
(so the first week is week number 1).
|
||||
|
||||
For ISO locales (first day of week = monday, min week days = 4) the concept
|
||||
is clear and the Python stdlib implements it directly.
|
||||
|
||||
For other locales, it's basically nonsensical as there is no actual
|
||||
definition. For now we will implement non-split first-day-of-year, that is
|
||||
the first week of the year is the one which contains the first day of the
|
||||
year (taking first day of week in account), and the days of the previous
|
||||
year which are part of that week are considered to be in the next year for
|
||||
calendaring purposes.
|
||||
|
||||
That is December 27, 2015 is in the first week of 2016.
|
||||
|
||||
An alternative is to split the week in two, so the week from December 27,
|
||||
2015 to January 2, 2016 would be *both* W53/2015 and W01/2016.
|
||||
"""
|
||||
if locale.first_week_day == 0 and locale.min_week_days == 4:
|
||||
# woohoo nothing to do
|
||||
return date.isocalendar()[:2]
|
||||
|
||||
# first find the first day of the first week of the next year, if the
|
||||
# reference date is after that then it must be in the first week of the next
|
||||
# year, remove this if we decide to implement split weeks instead
|
||||
fdny = date.replace(year=date.year + 1, month=1, day=1) \
|
||||
- relativedelta(weekday=weekdays[locale.first_week_day](-1))
|
||||
if date >= fdny:
|
||||
return date.year + 1, 1
|
||||
|
||||
# otherwise get the number of periods of 7 days between the first day of the
|
||||
# first week and the reference
|
||||
fdow = date.replace(month=1, day=1) \
|
||||
- relativedelta(weekday=weekdays[locale.first_week_day](-1))
|
||||
doy = (date - fdow).days
|
||||
|
||||
return date.year, (doy // 7 + 1)
|
||||
123
odoo-bringout-oca-ocb-base/odoo/tools/facade.py
Normal file
123
odoo-bringout-oca-ocb-base/odoo/tools/facade.py
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
import functools
|
||||
import inspect
|
||||
|
||||
|
||||
class ProxyAttr:
|
||||
"""
|
||||
Descriptor class for wrapping attributes of the wrapped instance.
|
||||
|
||||
Used with the `Proxy` class, this class is used to set exposed attributes of the wrapped instance while providing
|
||||
optional type casting.
|
||||
"""
|
||||
def __init__(self, cast=False):
|
||||
self._cast__ = cast
|
||||
|
||||
def __set_name__(self, owner, name):
|
||||
cast = self._cast__
|
||||
if cast:
|
||||
def getter(self):
|
||||
value = getattr(self._wrapped__, name)
|
||||
return cast(value) if value is not None else None
|
||||
else:
|
||||
def getter(self):
|
||||
return getattr(self._wrapped__, name)
|
||||
|
||||
def setter(self, value):
|
||||
return setattr(self._wrapped__, name, value)
|
||||
|
||||
setattr(owner, name, property(getter, setter))
|
||||
|
||||
|
||||
class ProxyFunc:
|
||||
"""
|
||||
Descriptor class for wrapping functions of the wrapped instance.
|
||||
|
||||
Used with the `Proxy` class, this class is used to set exposed functions of the wrapped instance
|
||||
while also allowing optional type casting on return values.
|
||||
"""
|
||||
def __init__(self, cast=False):
|
||||
self._cast__ = cast
|
||||
|
||||
def __set_name__(self, owner, name):
|
||||
func = getattr(owner._wrapped__, name)
|
||||
descriptor = inspect.getattr_static(owner._wrapped__, name)
|
||||
cast = self._cast__
|
||||
|
||||
if isinstance(descriptor, staticmethod):
|
||||
if cast:
|
||||
def wrap_func(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
return cast(result) if result is not None else None
|
||||
elif cast is None:
|
||||
def wrap_func(*args, **kwargs):
|
||||
func(*args, **kwargs)
|
||||
else:
|
||||
def wrap_func(*args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
functools.update_wrapper(wrap_func, func)
|
||||
wrap_func = staticmethod(wrap_func)
|
||||
|
||||
elif isinstance(descriptor, classmethod):
|
||||
if cast:
|
||||
def wrap_func(cls, *args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
return cast(result) if result is not None else None
|
||||
elif cast is None:
|
||||
def wrap_func(cls, *args, **kwargs):
|
||||
func(*args, **kwargs)
|
||||
else:
|
||||
def wrap_func(cls, *args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
functools.update_wrapper(wrap_func, func)
|
||||
wrap_func = classmethod(wrap_func)
|
||||
|
||||
else:
|
||||
if cast:
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
result = func(self._wrapped__, *args, **kwargs)
|
||||
return cast(result) if result is not None else None
|
||||
elif cast is None:
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
func(self._wrapped__, *args, **kwargs)
|
||||
else:
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
return func(self._wrapped__, *args, **kwargs)
|
||||
|
||||
functools.update_wrapper(wrap_func, func)
|
||||
|
||||
setattr(owner, name, wrap_func)
|
||||
|
||||
|
||||
class ProxyMeta(type):
|
||||
def __new__(cls, clsname, bases, attrs):
|
||||
attrs.update({func: ProxyFunc() for func in ("__repr__", "__str__") if func not in attrs})
|
||||
proxy_class = super().__new__(cls, clsname, bases, attrs)
|
||||
# To preserve the docstring, signature, code of the wrapped class
|
||||
# `updated` to an emtpy list so it doesn't copy the `__dict__`
|
||||
# See `functools.WRAPPER_ASSIGNMENTS` and `functools.WRAPPER_UPDATES`
|
||||
functools.update_wrapper(proxy_class, proxy_class._wrapped__, updated=[])
|
||||
return proxy_class
|
||||
|
||||
|
||||
class Proxy(metaclass=ProxyMeta):
|
||||
"""
|
||||
A proxy class implementing the Facade pattern.
|
||||
|
||||
This class delegates to an underlying instance while exposing a curated subset of its attributes and methods.
|
||||
Useful for controlling access, simplifying interfaces, or adding cross-cutting concerns.
|
||||
"""
|
||||
_wrapped__ = object
|
||||
|
||||
def __init__(self, instance):
|
||||
"""
|
||||
Initializes the proxy by setting the wrapped instance.
|
||||
|
||||
:param instance: The instance of the class to be wrapped.
|
||||
"""
|
||||
object.__setattr__(self, "_wrapped__", instance)
|
||||
|
||||
@property
|
||||
def __class__(self):
|
||||
return type(self)._wrapped__
|
||||
282
odoo-bringout-oca-ocb-base/odoo/tools/float_utils.py
Normal file
282
odoo-bringout-oca-ocb-base/odoo/tools/float_utils.py
Normal file
|
|
@ -0,0 +1,282 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
from __future__ import print_function
|
||||
import builtins
|
||||
import math
|
||||
|
||||
|
||||
def round(f):
|
||||
# P3's builtin round differs from P2 in the following manner:
|
||||
# * it rounds half to even rather than up (away from 0)
|
||||
# * round(-0.) loses the sign (it returns -0 rather than 0)
|
||||
# * round(x) returns an int rather than a float
|
||||
#
|
||||
# this compatibility shim implements Python 2's round in terms of
|
||||
# Python 3's so that important rounding error under P3 can be
|
||||
# trivially fixed, assuming the P2 behaviour to be debugged and
|
||||
# correct.
|
||||
roundf = builtins.round(f)
|
||||
if builtins.round(f + 1) - roundf != 1:
|
||||
return f + math.copysign(0.5, f)
|
||||
# copysign ensures round(-0.) -> -0 *and* result is a float
|
||||
return math.copysign(roundf, f)
|
||||
|
||||
def _float_check_precision(precision_digits=None, precision_rounding=None):
|
||||
assert (precision_digits is not None or precision_rounding is not None) and \
|
||||
not (precision_digits and precision_rounding),\
|
||||
"exactly one of precision_digits and precision_rounding must be specified"
|
||||
assert precision_rounding is None or precision_rounding > 0,\
|
||||
"precision_rounding must be positive, got %s" % precision_rounding
|
||||
if precision_digits is not None:
|
||||
return 10 ** -precision_digits
|
||||
return precision_rounding
|
||||
|
||||
def float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP'):
|
||||
"""Return ``value`` rounded to ``precision_digits`` decimal digits,
|
||||
minimizing IEEE-754 floating point representation errors, and applying
|
||||
the tie-breaking rule selected with ``rounding_method``, by default
|
||||
HALF-UP (away from zero).
|
||||
Precision must be given by ``precision_digits`` or ``precision_rounding``,
|
||||
not both!
|
||||
|
||||
:param float value: the value to round
|
||||
:param int precision_digits: number of fractional digits to round to.
|
||||
:param float precision_rounding: decimal number representing the minimum
|
||||
non-zero value at the desired precision (for example, 0.01 for a
|
||||
2-digit precision).
|
||||
:param rounding_method: the rounding method used: 'HALF-UP', 'UP' or 'DOWN',
|
||||
the first one rounding up to the closest number with the rule that
|
||||
number>=0.5 is rounded up to 1, the second always rounding up and the
|
||||
latest one always rounding down.
|
||||
:return: rounded float
|
||||
"""
|
||||
rounding_factor = _float_check_precision(precision_digits=precision_digits,
|
||||
precision_rounding=precision_rounding)
|
||||
if rounding_factor == 0 or value == 0:
|
||||
return 0.0
|
||||
|
||||
# NORMALIZE - ROUND - DENORMALIZE
|
||||
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
|
||||
# we normalize the value before rounding it as an integer, and de-normalize
|
||||
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
|
||||
# Due to IEE754 float/double representation limits, the approximation of the
|
||||
# real value may be slightly below the tie limit, resulting in an error of
|
||||
# 1 unit in the last place (ulp) after rounding.
|
||||
# For example 2.675 == 2.6749999999999998.
|
||||
# To correct this, we add a very small epsilon value, scaled to the
|
||||
# the order of magnitude of the value, to tip the tie-break in the right
|
||||
# direction.
|
||||
# Credit: discussion with OpenERP community members on bug 882036
|
||||
|
||||
normalized_value = value / rounding_factor # normalize
|
||||
sign = math.copysign(1.0, normalized_value)
|
||||
epsilon_magnitude = math.log(abs(normalized_value), 2)
|
||||
epsilon = 2**(epsilon_magnitude-52)
|
||||
|
||||
# TIE-BREAKING: UP/DOWN (for ceiling[resp. flooring] operations)
|
||||
# When rounding the value up[resp. down], we instead subtract[resp. add] the epsilon value
|
||||
# as the approximation of the real value may be slightly *above* the
|
||||
# tie limit, this would result in incorrectly rounding up[resp. down] to the next number
|
||||
# The math.ceil[resp. math.floor] operation is applied on the absolute value in order to
|
||||
# round "away from zero" and not "towards infinity", then the sign is
|
||||
# restored.
|
||||
|
||||
if rounding_method == 'UP':
|
||||
normalized_value -= sign*epsilon
|
||||
rounded_value = math.ceil(abs(normalized_value)) * sign
|
||||
|
||||
elif rounding_method == 'DOWN':
|
||||
normalized_value += sign*epsilon
|
||||
rounded_value = math.floor(abs(normalized_value)) * sign
|
||||
|
||||
# TIE-BREAKING: HALF-UP (for normal rounding)
|
||||
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
|
||||
else:
|
||||
normalized_value += math.copysign(epsilon, normalized_value)
|
||||
rounded_value = round(normalized_value) # round to integer
|
||||
|
||||
result = rounded_value * rounding_factor # de-normalize
|
||||
return result
|
||||
|
||||
def float_is_zero(value, precision_digits=None, precision_rounding=None):
|
||||
"""Returns true if ``value`` is small enough to be treated as
|
||||
zero at the given precision (smaller than the corresponding *epsilon*).
|
||||
The precision (``10**-precision_digits`` or ``precision_rounding``)
|
||||
is used as the zero *epsilon*: values less than that are considered
|
||||
to be zero.
|
||||
Precision must be given by ``precision_digits`` or ``precision_rounding``,
|
||||
not both!
|
||||
|
||||
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
|
||||
``float_compare(value1,value2) == 0``, as the former will round after
|
||||
computing the difference, while the latter will round before, giving
|
||||
different results for e.g. 0.006 and 0.002 at 2 digits precision.
|
||||
|
||||
:param int precision_digits: number of fractional digits to round to.
|
||||
:param float precision_rounding: decimal number representing the minimum
|
||||
non-zero value at the desired precision (for example, 0.01 for a
|
||||
2-digit precision).
|
||||
:param float value: value to compare with the precision's zero
|
||||
:return: True if ``value`` is considered zero
|
||||
"""
|
||||
epsilon = _float_check_precision(precision_digits=precision_digits,
|
||||
precision_rounding=precision_rounding)
|
||||
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
|
||||
|
||||
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
|
||||
"""Compare ``value1`` and ``value2`` after rounding them according to the
|
||||
given precision. A value is considered lower/greater than another value
|
||||
if their rounded value is different. This is not the same as having a
|
||||
non-zero difference!
|
||||
Precision must be given by ``precision_digits`` or ``precision_rounding``,
|
||||
not both!
|
||||
|
||||
Example: 1.432 and 1.431 are equal at 2 digits precision,
|
||||
so this method would return 0
|
||||
However 0.006 and 0.002 are considered different (this method returns 1)
|
||||
because they respectively round to 0.01 and 0.0, even though
|
||||
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
|
||||
|
||||
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
|
||||
``float_compare(value1,value2) == 0``, as the former will round after
|
||||
computing the difference, while the latter will round before, giving
|
||||
different results for e.g. 0.006 and 0.002 at 2 digits precision.
|
||||
|
||||
:param int precision_digits: number of fractional digits to round to.
|
||||
:param float precision_rounding: decimal number representing the minimum
|
||||
non-zero value at the desired precision (for example, 0.01 for a
|
||||
2-digit precision).
|
||||
:param float value1: first value to compare
|
||||
:param float value2: second value to compare
|
||||
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
|
||||
equal to, or greater than ``value2``, at the given precision.
|
||||
"""
|
||||
rounding_factor = _float_check_precision(precision_digits=precision_digits,
|
||||
precision_rounding=precision_rounding)
|
||||
value1 = float_round(value1, precision_rounding=rounding_factor)
|
||||
value2 = float_round(value2, precision_rounding=rounding_factor)
|
||||
delta = value1 - value2
|
||||
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
|
||||
return -1 if delta < 0.0 else 1
|
||||
|
||||
def float_repr(value, precision_digits):
|
||||
"""Returns a string representation of a float with the
|
||||
given number of fractional digits. This should not be
|
||||
used to perform a rounding operation (this is done via
|
||||
:func:`~.float_round`), but only to produce a suitable
|
||||
string representation for a float.
|
||||
|
||||
:param float value:
|
||||
:param int precision_digits: number of fractional digits to include in the output
|
||||
"""
|
||||
# Can't use str() here because it seems to have an intrinsic
|
||||
# rounding to 12 significant digits, which causes a loss of
|
||||
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
|
||||
return ("%%.%sf" % precision_digits) % value
|
||||
|
||||
_float_repr = float_repr
|
||||
|
||||
def float_split_str(value, precision_digits):
|
||||
"""Splits the given float 'value' in its unitary and decimal parts,
|
||||
returning each of them as a string, rounding the value using
|
||||
the provided ``precision_digits`` argument.
|
||||
|
||||
The length of the string returned for decimal places will always
|
||||
be equal to ``precision_digits``, adding zeros at the end if needed.
|
||||
|
||||
In case ``precision_digits`` is zero, an empty string is returned for
|
||||
the decimal places.
|
||||
|
||||
Examples:
|
||||
1.432 with precision 2 => ('1', '43')
|
||||
1.49 with precision 1 => ('1', '5')
|
||||
1.1 with precision 3 => ('1', '100')
|
||||
1.12 with precision 0 => ('1', '')
|
||||
|
||||
:param float value: value to split.
|
||||
:param int precision_digits: number of fractional digits to round to.
|
||||
:return: returns the tuple(<unitary part>, <decimal part>) of the given value
|
||||
:rtype: tuple(str, str)
|
||||
"""
|
||||
value = float_round(value, precision_digits=precision_digits)
|
||||
value_repr = float_repr(value, precision_digits)
|
||||
return tuple(value_repr.split('.')) if precision_digits else (value_repr, '')
|
||||
|
||||
def float_split(value, precision_digits):
|
||||
""" same as float_split_str() except that it returns the unitary and decimal
|
||||
parts as integers instead of strings. In case ``precision_digits`` is zero,
|
||||
0 is always returned as decimal part.
|
||||
|
||||
:rtype: tuple(int, int)
|
||||
"""
|
||||
units, cents = float_split_str(value, precision_digits)
|
||||
if not cents:
|
||||
return int(units), 0
|
||||
return int(units), int(cents)
|
||||
|
||||
def json_float_round(value, precision_digits, rounding_method='HALF-UP'):
|
||||
"""Not suitable for float calculations! Similar to float_repr except that it
|
||||
returns a float suitable for json dump
|
||||
|
||||
This may be necessary to produce "exact" representations of rounded float
|
||||
values during serialization, such as what is done by `json.dumps()`.
|
||||
Unfortunately `json.dumps` does not allow any form of custom float representation,
|
||||
nor any custom types, everything is serialized from the basic JSON types.
|
||||
|
||||
:param int precision_digits: number of fractional digits to round to.
|
||||
:param rounding_method: the rounding method used: 'HALF-UP', 'UP' or 'DOWN',
|
||||
the first one rounding up to the closest number with the rule that
|
||||
number>=0.5 is rounded up to 1, the second always rounding up and the
|
||||
latest one always rounding down.
|
||||
:return: a rounded float value that must not be used for calculations, but
|
||||
is ready to be serialized in JSON with minimal chances of
|
||||
representation errors.
|
||||
"""
|
||||
rounded_value = float_round(value, precision_digits=precision_digits, rounding_method=rounding_method)
|
||||
rounded_repr = float_repr(rounded_value, precision_digits=precision_digits)
|
||||
# As of Python 3.1, rounded_repr should be the shortest representation for our
|
||||
# rounded float, so we create a new float whose repr is expected
|
||||
# to be the same value, or a value that is semantically identical
|
||||
# and will be used in the json serialization.
|
||||
# e.g. if rounded_repr is '3.1750', the new float repr could be 3.175
|
||||
# but not 3.174999999999322452.
|
||||
# Cfr. bpo-1580: https://bugs.python.org/issue1580
|
||||
return float(rounded_repr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
import time
|
||||
start = time.time()
|
||||
count = 0
|
||||
errors = 0
|
||||
|
||||
def try_round(amount, expected, precision_digits=3):
|
||||
global count, errors; count += 1
|
||||
result = float_repr(float_round(amount, precision_digits=precision_digits),
|
||||
precision_digits=precision_digits)
|
||||
if result != expected:
|
||||
errors += 1
|
||||
print('###!!! Rounding error: got %s , expected %s' % (result, expected))
|
||||
|
||||
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
|
||||
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
|
||||
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
|
||||
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
|
||||
for magnitude in range(7):
|
||||
for frac, exp, prec in zip(fractions, expecteds, precisions):
|
||||
for sign in [-1,1]:
|
||||
for x in range(0, 10000, 97):
|
||||
n = x * 10**magnitude
|
||||
f = sign * (n + frac)
|
||||
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
|
||||
try_round(f, f_exp, precision_digits=prec)
|
||||
|
||||
stop = time.time()
|
||||
|
||||
# Micro-bench results:
|
||||
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
|
||||
# with decimal:
|
||||
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
|
||||
print(count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs')
|
||||
266
odoo-bringout-oca-ocb-base/odoo/tools/func.py
Normal file
266
odoo-bringout-oca-ocb-base/odoo/tools/func.py
Normal file
|
|
@ -0,0 +1,266 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
__all__ = ['synchronized', 'lazy_classproperty', 'lazy_property',
|
||||
'classproperty', 'conditional', 'lazy']
|
||||
|
||||
import warnings
|
||||
from inspect import getsourcefile, Parameter, signature
|
||||
from functools import wraps
|
||||
from json import JSONEncoder
|
||||
|
||||
from decorator import decorator
|
||||
|
||||
class lazy_property(object):
|
||||
""" Decorator for a lazy property of an object, i.e., an object attribute
|
||||
that is determined by the result of a method call evaluated once. To
|
||||
reevaluate the property, simply delete the attribute on the object, and
|
||||
get it again.
|
||||
"""
|
||||
def __init__(self, fget):
|
||||
assert not fget.__name__.startswith('__'),\
|
||||
"lazy_property does not support mangled names"
|
||||
self.fget = fget
|
||||
|
||||
def __get__(self, obj, cls):
|
||||
if obj is None:
|
||||
return self
|
||||
value = self.fget(obj)
|
||||
setattr(obj, self.fget.__name__, value)
|
||||
return value
|
||||
|
||||
@property
|
||||
def __doc__(self):
|
||||
return self.fget.__doc__
|
||||
|
||||
@staticmethod
|
||||
def reset_all(obj):
|
||||
""" Reset all lazy properties on the instance `obj`. """
|
||||
cls = type(obj)
|
||||
obj_dict = vars(obj)
|
||||
for name in list(obj_dict):
|
||||
if isinstance(getattr(cls, name, None), lazy_property):
|
||||
obj_dict.pop(name)
|
||||
|
||||
class lazy_classproperty(lazy_property):
|
||||
""" Similar to :class:`lazy_property`, but for classes. """
|
||||
def __get__(self, obj, cls):
|
||||
val = self.fget(cls)
|
||||
setattr(cls, self.fget.__name__, val)
|
||||
return val
|
||||
|
||||
def conditional(condition, decorator):
|
||||
""" Decorator for a conditionally applied decorator.
|
||||
|
||||
Example::
|
||||
|
||||
@conditional(get_config('use_cache'), ormcache)
|
||||
def fn():
|
||||
pass
|
||||
"""
|
||||
if condition:
|
||||
return decorator
|
||||
else:
|
||||
return lambda fn: fn
|
||||
|
||||
def filter_kwargs(func, kwargs):
|
||||
""" Filter the given keyword arguments to only return the kwargs
|
||||
that binds to the function's signature.
|
||||
"""
|
||||
leftovers = set(kwargs)
|
||||
for p in signature(func).parameters.values():
|
||||
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY):
|
||||
leftovers.discard(p.name)
|
||||
elif p.kind == Parameter.VAR_KEYWORD: # **kwargs
|
||||
leftovers.clear()
|
||||
break
|
||||
|
||||
if not leftovers:
|
||||
return kwargs
|
||||
|
||||
return {key: kwargs[key] for key in kwargs if key not in leftovers}
|
||||
|
||||
def synchronized(lock_attr='_lock'):
|
||||
@decorator
|
||||
def locked(func, inst, *args, **kwargs):
|
||||
with getattr(inst, lock_attr):
|
||||
return func(inst, *args, **kwargs)
|
||||
return locked
|
||||
locked = synchronized()
|
||||
|
||||
def frame_codeinfo(fframe, back=0):
|
||||
""" Return a (filename, line) pair for a previous frame .
|
||||
@return (filename, lineno) where lineno is either int or string==''
|
||||
"""
|
||||
|
||||
try:
|
||||
if not fframe:
|
||||
return "<unknown>", ''
|
||||
for i in range(back):
|
||||
fframe = fframe.f_back
|
||||
try:
|
||||
fname = getsourcefile(fframe)
|
||||
except TypeError:
|
||||
fname = '<builtin>'
|
||||
lineno = fframe.f_lineno or ''
|
||||
return fname, lineno
|
||||
except Exception:
|
||||
return "<unknown>", ''
|
||||
|
||||
def compose(a, b):
|
||||
""" Composes the callables ``a`` and ``b``. ``compose(a, b)(*args)`` is
|
||||
equivalent to ``a(b(*args))``.
|
||||
|
||||
Can be used as a decorator by partially applying ``a``::
|
||||
|
||||
@partial(compose, a)
|
||||
def b():
|
||||
...
|
||||
"""
|
||||
warnings.warn(
|
||||
"Since 16.0, just byo or use a dedicated library like funcy.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
@wraps(b)
|
||||
def wrapper(*args, **kwargs):
|
||||
return a(b(*args, **kwargs))
|
||||
return wrapper
|
||||
|
||||
|
||||
class _ClassProperty(property):
|
||||
def __get__(self, cls, owner):
|
||||
return self.fget.__get__(None, owner)()
|
||||
|
||||
def classproperty(func):
|
||||
return _ClassProperty(classmethod(func))
|
||||
|
||||
|
||||
class lazy(object):
|
||||
""" A proxy to the (memoized) result of a lazy evaluation:
|
||||
|
||||
.. code-block::
|
||||
|
||||
foo = lazy(func, arg) # func(arg) is not called yet
|
||||
bar = foo + 1 # eval func(arg) and add 1
|
||||
baz = foo + 2 # use result of func(arg) and add 2
|
||||
"""
|
||||
__slots__ = ['_func', '_args', '_kwargs', '_cached_value']
|
||||
|
||||
def __init__(self, func, *args, **kwargs):
|
||||
# bypass own __setattr__
|
||||
object.__setattr__(self, '_func', func)
|
||||
object.__setattr__(self, '_args', args)
|
||||
object.__setattr__(self, '_kwargs', kwargs)
|
||||
|
||||
@property
|
||||
def _value(self):
|
||||
if self._func is not None:
|
||||
value = self._func(*self._args, **self._kwargs)
|
||||
object.__setattr__(self, '_func', None)
|
||||
object.__setattr__(self, '_args', None)
|
||||
object.__setattr__(self, '_kwargs', None)
|
||||
object.__setattr__(self, '_cached_value', value)
|
||||
return self._cached_value
|
||||
|
||||
def __getattr__(self, name): return getattr(self._value, name)
|
||||
def __setattr__(self, name, value): return setattr(self._value, name, value)
|
||||
def __delattr__(self, name): return delattr(self._value, name)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self._value) if self._func is None else object.__repr__(self)
|
||||
def __str__(self): return str(self._value)
|
||||
def __bytes__(self): return bytes(self._value)
|
||||
def __format__(self, format_spec): return format(self._value, format_spec)
|
||||
|
||||
def __lt__(self, other): return other > self._value
|
||||
def __le__(self, other): return other >= self._value
|
||||
def __eq__(self, other): return other == self._value
|
||||
def __ne__(self, other): return other != self._value
|
||||
def __gt__(self, other): return other < self._value
|
||||
def __ge__(self, other): return other <= self._value
|
||||
|
||||
def __hash__(self): return hash(self._value)
|
||||
def __bool__(self): return bool(self._value)
|
||||
|
||||
def __call__(self, *args, **kwargs): return self._value(*args, **kwargs)
|
||||
|
||||
def __len__(self): return len(self._value)
|
||||
def __getitem__(self, key): return self._value[key]
|
||||
def __missing__(self, key): return self._value.__missing__(key)
|
||||
def __setitem__(self, key, value): self._value[key] = value
|
||||
def __delitem__(self, key): del self._value[key]
|
||||
def __iter__(self): return iter(self._value)
|
||||
def __reversed__(self): return reversed(self._value)
|
||||
def __contains__(self, key): return key in self._value
|
||||
|
||||
def __add__(self, other): return self._value.__add__(other)
|
||||
def __sub__(self, other): return self._value.__sub__(other)
|
||||
def __mul__(self, other): return self._value.__mul__(other)
|
||||
def __matmul__(self, other): return self._value.__matmul__(other)
|
||||
def __truediv__(self, other): return self._value.__truediv__(other)
|
||||
def __floordiv__(self, other): return self._value.__floordiv__(other)
|
||||
def __mod__(self, other): return self._value.__mod__(other)
|
||||
def __divmod__(self, other): return self._value.__divmod__(other)
|
||||
def __pow__(self, other): return self._value.__pow__(other)
|
||||
def __lshift__(self, other): return self._value.__lshift__(other)
|
||||
def __rshift__(self, other): return self._value.__rshift__(other)
|
||||
def __and__(self, other): return self._value.__and__(other)
|
||||
def __xor__(self, other): return self._value.__xor__(other)
|
||||
def __or__(self, other): return self._value.__or__(other)
|
||||
|
||||
def __radd__(self, other): return self._value.__radd__(other)
|
||||
def __rsub__(self, other): return self._value.__rsub__(other)
|
||||
def __rmul__(self, other): return self._value.__rmul__(other)
|
||||
def __rmatmul__(self, other): return self._value.__rmatmul__(other)
|
||||
def __rtruediv__(self, other): return self._value.__rtruediv__(other)
|
||||
def __rfloordiv__(self, other): return self._value.__rfloordiv__(other)
|
||||
def __rmod__(self, other): return self._value.__rmod__(other)
|
||||
def __rdivmod__(self, other): return self._value.__rdivmod__(other)
|
||||
def __rpow__(self, other): return self._value.__rpow__(other)
|
||||
def __rlshift__(self, other): return self._value.__rlshift__(other)
|
||||
def __rrshift__(self, other): return self._value.__rrshift__(other)
|
||||
def __rand__(self, other): return self._value.__rand__(other)
|
||||
def __rxor__(self, other): return self._value.__rxor__(other)
|
||||
def __ror__(self, other): return self._value.__ror__(other)
|
||||
|
||||
def __iadd__(self, other): return self._value.__iadd__(other)
|
||||
def __isub__(self, other): return self._value.__isub__(other)
|
||||
def __imul__(self, other): return self._value.__imul__(other)
|
||||
def __imatmul__(self, other): return self._value.__imatmul__(other)
|
||||
def __itruediv__(self, other): return self._value.__itruediv__(other)
|
||||
def __ifloordiv__(self, other): return self._value.__ifloordiv__(other)
|
||||
def __imod__(self, other): return self._value.__imod__(other)
|
||||
def __ipow__(self, other): return self._value.__ipow__(other)
|
||||
def __ilshift__(self, other): return self._value.__ilshift__(other)
|
||||
def __irshift__(self, other): return self._value.__irshift__(other)
|
||||
def __iand__(self, other): return self._value.__iand__(other)
|
||||
def __ixor__(self, other): return self._value.__ixor__(other)
|
||||
def __ior__(self, other): return self._value.__ior__(other)
|
||||
|
||||
def __neg__(self): return self._value.__neg__()
|
||||
def __pos__(self): return self._value.__pos__()
|
||||
def __abs__(self): return self._value.__abs__()
|
||||
def __invert__(self): return self._value.__invert__()
|
||||
|
||||
def __complex__(self): return complex(self._value)
|
||||
def __int__(self): return int(self._value)
|
||||
def __float__(self): return float(self._value)
|
||||
|
||||
def __index__(self): return self._value.__index__()
|
||||
|
||||
def __round__(self): return self._value.__round__()
|
||||
def __trunc__(self): return self._value.__trunc__()
|
||||
def __floor__(self): return self._value.__floor__()
|
||||
def __ceil__(self): return self._value.__ceil__()
|
||||
|
||||
def __enter__(self): return self._value.__enter__()
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
return self._value.__exit__(exc_type, exc_value, traceback)
|
||||
|
||||
def __await__(self): return self._value.__await__()
|
||||
def __aiter__(self): return self._value.__aiter__()
|
||||
def __anext__(self): return self._value.__anext__()
|
||||
def __aenter__(self): return self._value.__aenter__()
|
||||
def __aexit__(self, exc_type, exc_value, traceback):
|
||||
return self._value.__aexit__(exc_type, exc_value, traceback)
|
||||
66
odoo-bringout-oca-ocb-base/odoo/tools/geoipresolver.py
Normal file
66
odoo-bringout-oca-ocb-base/odoo/tools/geoipresolver.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
import os.path
|
||||
|
||||
try:
|
||||
import GeoIP # Legacy
|
||||
except ImportError:
|
||||
GeoIP = None
|
||||
|
||||
try:
|
||||
import geoip2
|
||||
import geoip2.database
|
||||
except ImportError:
|
||||
geoip2 = None
|
||||
|
||||
class GeoIPResolver(object):
|
||||
def __init__(self, fname):
|
||||
self.fname = fname
|
||||
try:
|
||||
self._db = geoip2.database.Reader(fname)
|
||||
self.version = 2
|
||||
except Exception:
|
||||
try:
|
||||
self._db = GeoIP.open(fname, GeoIP.GEOIP_STANDARD)
|
||||
self.version = 1
|
||||
assert self._db.database_info is not None
|
||||
except Exception:
|
||||
raise ValueError('Invalid GeoIP database: %r' % fname)
|
||||
|
||||
def __del__(self):
|
||||
if self.version == 2:
|
||||
self._db.close()
|
||||
|
||||
@classmethod
|
||||
def open(cls, fname):
|
||||
if not GeoIP and not geoip2:
|
||||
return None
|
||||
if not os.path.exists(fname):
|
||||
return None
|
||||
return GeoIPResolver(fname)
|
||||
|
||||
def resolve(self, ip):
|
||||
if self.version == 1:
|
||||
return self._db.record_by_addr(ip) or {}
|
||||
elif self.version == 2:
|
||||
try:
|
||||
r = self._db.city(ip)
|
||||
except (ValueError, geoip2.errors.AddressNotFoundError):
|
||||
return {}
|
||||
# Compatibility with Legacy database.
|
||||
# Some ips cannot be located to a specific country. Legacy DB used to locate them in
|
||||
# continent instead of country. Do the same to not change behavior of existing code.
|
||||
country, attr = (r.country, 'iso_code') if r.country.geoname_id else (r.continent, 'code')
|
||||
return {
|
||||
'city': r.city.name,
|
||||
'country_code': getattr(country, attr),
|
||||
'country_name': country.name,
|
||||
'latitude': r.location.latitude,
|
||||
'longitude': r.location.longitude,
|
||||
'region': r.subdivisions[0].iso_code if r.subdivisions else None,
|
||||
'time_zone': r.location.time_zone,
|
||||
}
|
||||
|
||||
# compat
|
||||
def record_by_addr(self, addr):
|
||||
return self.resolve(addr)
|
||||
516
odoo-bringout-oca-ocb-base/odoo/tools/image.py
Normal file
516
odoo-bringout-oca-ocb-base/odoo/tools/image.py
Normal file
|
|
@ -0,0 +1,516 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
import base64
|
||||
import binascii
|
||||
import io
|
||||
|
||||
from PIL import Image, ImageOps
|
||||
# We can preload Ico too because it is considered safe
|
||||
from PIL import IcoImagePlugin
|
||||
try:
|
||||
from PIL.Image import Transpose, Palette, Resampling
|
||||
except ImportError:
|
||||
Transpose = Palette = Resampling = Image
|
||||
|
||||
from random import randrange
|
||||
|
||||
from odoo.exceptions import UserError
|
||||
from odoo.tools.translate import _
|
||||
|
||||
|
||||
# Preload PIL with the minimal subset of image formats we need
|
||||
Image.preinit()
|
||||
Image._initialized = 2
|
||||
|
||||
# Maps only the 6 first bits of the base64 data, accurate enough
|
||||
# for our purpose and faster than decoding the full blob first
|
||||
FILETYPE_BASE64_MAGICWORD = {
|
||||
b'/': 'jpg',
|
||||
b'R': 'gif',
|
||||
b'i': 'png',
|
||||
b'P': 'svg+xml',
|
||||
}
|
||||
|
||||
EXIF_TAG_ORIENTATION = 0x112
|
||||
# The target is to have 1st row/col to be top/left
|
||||
# Note: rotate is counterclockwise
|
||||
EXIF_TAG_ORIENTATION_TO_TRANSPOSE_METHODS = { # Initial side on 1st row/col:
|
||||
0: [], # reserved
|
||||
1: [], # top/left
|
||||
2: [Transpose.FLIP_LEFT_RIGHT], # top/right
|
||||
3: [Transpose.ROTATE_180], # bottom/right
|
||||
4: [Transpose.FLIP_TOP_BOTTOM], # bottom/left
|
||||
5: [Transpose.FLIP_LEFT_RIGHT, Transpose.ROTATE_90],# left/top
|
||||
6: [Transpose.ROTATE_270], # right/top
|
||||
7: [Transpose.FLIP_TOP_BOTTOM, Transpose.ROTATE_90],# right/bottom
|
||||
8: [Transpose.ROTATE_90], # left/bottom
|
||||
}
|
||||
|
||||
# Arbitrary limit to fit most resolutions, including Samsung Galaxy A22 photo,
|
||||
# 8K with a ratio up to 16:10, and almost all variants of 4320p
|
||||
IMAGE_MAX_RESOLUTION = 50e6
|
||||
|
||||
|
||||
class ImageProcess():
|
||||
|
||||
def __init__(self, source, verify_resolution=True):
|
||||
"""Initialize the `source` image for processing.
|
||||
|
||||
:param source: the original image binary
|
||||
|
||||
No processing will be done if the `source` is falsy or if
|
||||
the image is SVG.
|
||||
|
||||
:param verify_resolution: if True, make sure the original image size is not
|
||||
excessive before starting to process it. The max allowed resolution is
|
||||
defined by `IMAGE_MAX_RESOLUTION`.
|
||||
:type verify_resolution: bool
|
||||
:rtype: ImageProcess
|
||||
|
||||
:raise: ValueError if `verify_resolution` is True and the image is too large
|
||||
:raise: UserError if the image can't be identified by PIL
|
||||
"""
|
||||
self.source = source or False
|
||||
self.operationsCount = 0
|
||||
|
||||
if not source or source[:1] == b'<':
|
||||
# don't process empty source or SVG
|
||||
self.image = False
|
||||
else:
|
||||
try:
|
||||
self.image = Image.open(io.BytesIO(source))
|
||||
except (OSError, binascii.Error):
|
||||
raise UserError(_("This file could not be decoded as an image file."))
|
||||
|
||||
# Original format has to be saved before fixing the orientation or
|
||||
# doing any other operations because the information will be lost on
|
||||
# the resulting image.
|
||||
self.original_format = (self.image.format or '').upper()
|
||||
|
||||
self.image = image_fix_orientation(self.image)
|
||||
|
||||
w, h = self.image.size
|
||||
if verify_resolution and w * h > IMAGE_MAX_RESOLUTION:
|
||||
raise UserError(_("Too large image (above %sMpx), reduce the image size.", str(IMAGE_MAX_RESOLUTION / 1e6)))
|
||||
|
||||
def image_quality(self, quality=0, output_format=''):
|
||||
"""Return the image resulting of all the image processing
|
||||
operations that have been applied previously.
|
||||
|
||||
Return False if the initialized `image` was falsy, and return
|
||||
the initialized `image` without change if it was SVG.
|
||||
|
||||
Also return the initialized `image` if no operations have been applied
|
||||
and the `output_format` is the same as the original format and the
|
||||
quality is not specified.
|
||||
|
||||
:param int quality: quality setting to apply. Default to 0.
|
||||
|
||||
- for JPEG: 1 is worse, 95 is best. Values above 95 should be
|
||||
avoided. Falsy values will fallback to 95, but only if the image
|
||||
was changed, otherwise the original image is returned.
|
||||
- for PNG: set falsy to prevent conversion to a WEB palette.
|
||||
- for other formats: no effect.
|
||||
:param str output_format: the output format. Can be PNG, JPEG, GIF, or ICO.
|
||||
Default to the format of the original image. BMP is converted to
|
||||
PNG, other formats than those mentioned above are converted to JPEG.
|
||||
:return: image
|
||||
:rtype: bytes or False
|
||||
"""
|
||||
if not self.image:
|
||||
return self.source
|
||||
|
||||
output_image = self.image
|
||||
|
||||
output_format = output_format.upper() or self.original_format
|
||||
if output_format == 'BMP':
|
||||
output_format = 'PNG'
|
||||
elif output_format not in ['PNG', 'JPEG', 'GIF', 'ICO']:
|
||||
output_format = 'JPEG'
|
||||
|
||||
if not self.operationsCount and output_format == self.original_format and not quality:
|
||||
return self.source
|
||||
|
||||
opt = {'output_format': output_format}
|
||||
|
||||
if output_format == 'PNG':
|
||||
opt['optimize'] = True
|
||||
if quality:
|
||||
if output_image.mode != 'P':
|
||||
# Floyd Steinberg dithering by default
|
||||
output_image = output_image.convert('RGBA').convert('P', palette=Palette.WEB, colors=256)
|
||||
if output_format == 'JPEG':
|
||||
opt['optimize'] = True
|
||||
opt['quality'] = quality or 95
|
||||
if output_format == 'GIF':
|
||||
opt['optimize'] = True
|
||||
opt['save_all'] = True
|
||||
|
||||
if output_image.mode not in ["1", "L", "P", "RGB", "RGBA"] or (output_format == 'JPEG' and output_image.mode == 'RGBA'):
|
||||
output_image = output_image.convert("RGB")
|
||||
|
||||
return image_apply_opt(output_image, **opt)
|
||||
|
||||
def resize(self, max_width=0, max_height=0):
|
||||
"""Resize the image.
|
||||
|
||||
The image is never resized above the current image size. This method is
|
||||
only to create a smaller version of the image.
|
||||
|
||||
The current ratio is preserved. To change the ratio, see `crop_resize`.
|
||||
|
||||
If `max_width` or `max_height` is falsy, it will be computed from the
|
||||
other to keep the current ratio. If both are falsy, no resize is done.
|
||||
|
||||
It is currently not supported for GIF because we do not handle all the
|
||||
frames properly.
|
||||
|
||||
:param int max_width: max width
|
||||
:param int max_height: max height
|
||||
:return: self to allow chaining
|
||||
:rtype: ImageProcess
|
||||
"""
|
||||
if self.image and self.original_format != 'GIF' and (max_width or max_height):
|
||||
w, h = self.image.size
|
||||
asked_width = max_width or (w * max_height) // h
|
||||
asked_height = max_height or (h * max_width) // w
|
||||
if asked_width != w or asked_height != h:
|
||||
self.image.thumbnail((asked_width, asked_height), Resampling.LANCZOS)
|
||||
if self.image.width != w or self.image.height != h:
|
||||
self.operationsCount += 1
|
||||
return self
|
||||
|
||||
def crop_resize(self, max_width, max_height, center_x=0.5, center_y=0.5):
|
||||
"""Crop and resize the image.
|
||||
|
||||
The image is never resized above the current image size. This method is
|
||||
only to create smaller versions of the image.
|
||||
|
||||
Instead of preserving the ratio of the original image like `resize`,
|
||||
this method will force the output to take the ratio of the given
|
||||
`max_width` and `max_height`, so both have to be defined.
|
||||
|
||||
The crop is done before the resize in order to preserve as much of the
|
||||
original image as possible. The goal of this method is primarily to
|
||||
resize to a given ratio, and it is not to crop unwanted parts of the
|
||||
original image. If the latter is what you want to do, you should create
|
||||
another method, or directly use the `crop` method from PIL.
|
||||
|
||||
It is currently not supported for GIF because we do not handle all the
|
||||
frames properly.
|
||||
|
||||
:param int max_width: max width
|
||||
:param int max_height: max height
|
||||
:param float center_x: the center of the crop between 0 (left) and 1
|
||||
(right). Defaults to 0.5 (center).
|
||||
:param float center_y: the center of the crop between 0 (top) and 1
|
||||
(bottom). Defaults to 0.5 (center).
|
||||
:return: self to allow chaining
|
||||
:rtype: ImageProcess
|
||||
"""
|
||||
if self.image and self.original_format != 'GIF' and max_width and max_height:
|
||||
w, h = self.image.size
|
||||
# We want to keep as much of the image as possible -> at least one
|
||||
# of the 2 crop dimensions always has to be the same value as the
|
||||
# original image.
|
||||
# The target size will be reached with the final resize.
|
||||
if w / max_width > h / max_height:
|
||||
new_w, new_h = w, (max_height * w) // max_width
|
||||
else:
|
||||
new_w, new_h = (max_width * h) // max_height, h
|
||||
|
||||
# No cropping above image size.
|
||||
if new_w > w:
|
||||
new_w, new_h = w, (new_h * w) // new_w
|
||||
if new_h > h:
|
||||
new_w, new_h = (new_w * h) // new_h, h
|
||||
|
||||
# Dimensions should be at least 1.
|
||||
new_w, new_h = max(new_w, 1), max(new_h, 1)
|
||||
|
||||
# Correctly place the center of the crop.
|
||||
x_offset = int((w - new_w) * center_x)
|
||||
h_offset = int((h - new_h) * center_y)
|
||||
|
||||
if new_w != w or new_h != h:
|
||||
self.image = self.image.crop((x_offset, h_offset, x_offset + new_w, h_offset + new_h))
|
||||
if self.image.width != w or self.image.height != h:
|
||||
self.operationsCount += 1
|
||||
|
||||
return self.resize(max_width, max_height)
|
||||
|
||||
def colorize(self):
|
||||
"""Replace the transparent background by a random color.
|
||||
|
||||
:return: self to allow chaining
|
||||
:rtype: ImageProcess
|
||||
"""
|
||||
if self.image:
|
||||
original = self.image
|
||||
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
|
||||
self.image = Image.new('RGB', original.size)
|
||||
self.image.paste(color, box=(0, 0) + original.size)
|
||||
self.image.paste(original, mask=original)
|
||||
self.operationsCount += 1
|
||||
return self
|
||||
|
||||
|
||||
def image_process(source, size=(0, 0), verify_resolution=False, quality=0, crop=None, colorize=False, output_format=''):
|
||||
"""Process the `source` image by executing the given operations and
|
||||
return the result image.
|
||||
"""
|
||||
if not source or ((not size or (not size[0] and not size[1])) and not verify_resolution and not quality and not crop and not colorize and not output_format):
|
||||
# for performance: don't do anything if the image is falsy or if
|
||||
# no operations have been requested
|
||||
return source
|
||||
|
||||
image = ImageProcess(source, verify_resolution)
|
||||
if size:
|
||||
if crop:
|
||||
center_x = 0.5
|
||||
center_y = 0.5
|
||||
if crop == 'top':
|
||||
center_y = 0
|
||||
elif crop == 'bottom':
|
||||
center_y = 1
|
||||
image.crop_resize(max_width=size[0], max_height=size[1], center_x=center_x, center_y=center_y)
|
||||
else:
|
||||
image.resize(max_width=size[0], max_height=size[1])
|
||||
if colorize:
|
||||
image.colorize()
|
||||
return image.image_quality(quality=quality, output_format=output_format)
|
||||
|
||||
|
||||
# ----------------------------------------
|
||||
# Misc image tools
|
||||
# ---------------------------------------
|
||||
|
||||
def average_dominant_color(colors, mitigate=175, max_margin=140):
|
||||
"""This function is used to calculate the dominant colors when given a list of colors
|
||||
|
||||
There are 5 steps:
|
||||
|
||||
1) Select dominant colors (highest count), isolate its values and remove
|
||||
it from the current color set.
|
||||
2) Set margins according to the prevalence of the dominant color.
|
||||
3) Evaluate the colors. Similar colors are grouped in the dominant set
|
||||
while others are put in the "remaining" list.
|
||||
4) Calculate the average color for the dominant set. This is done by
|
||||
averaging each band and joining them into a tuple.
|
||||
5) Mitigate final average and convert it to hex
|
||||
|
||||
:param colors: list of tuples having:
|
||||
|
||||
0. color count in the image
|
||||
1. actual color: tuple(R, G, B, A)
|
||||
|
||||
-> these can be extracted from a PIL image using
|
||||
:meth:`~PIL.Image.Image.getcolors`
|
||||
:param mitigate: maximum value a band can reach
|
||||
:param max_margin: maximum difference from one of the dominant values
|
||||
:returns: a tuple with two items:
|
||||
|
||||
0. the average color of the dominant set as: tuple(R, G, B)
|
||||
1. list of remaining colors, used to evaluate subsequent dominant colors
|
||||
"""
|
||||
dominant_color = max(colors)
|
||||
dominant_rgb = dominant_color[1][:3]
|
||||
dominant_set = [dominant_color]
|
||||
remaining = []
|
||||
|
||||
margins = [max_margin * (1 - dominant_color[0] /
|
||||
sum([col[0] for col in colors]))] * 3
|
||||
|
||||
colors.remove(dominant_color)
|
||||
|
||||
for color in colors:
|
||||
rgb = color[1]
|
||||
if (rgb[0] < dominant_rgb[0] + margins[0] and rgb[0] > dominant_rgb[0] - margins[0] and
|
||||
rgb[1] < dominant_rgb[1] + margins[1] and rgb[1] > dominant_rgb[1] - margins[1] and
|
||||
rgb[2] < dominant_rgb[2] + margins[2] and rgb[2] > dominant_rgb[2] - margins[2]):
|
||||
dominant_set.append(color)
|
||||
else:
|
||||
remaining.append(color)
|
||||
|
||||
dominant_avg = []
|
||||
for band in range(3):
|
||||
avg = total = 0
|
||||
for color in dominant_set:
|
||||
avg += color[0] * color[1][band]
|
||||
total += color[0]
|
||||
dominant_avg.append(int(avg / total))
|
||||
|
||||
final_dominant = []
|
||||
brightest = max(dominant_avg)
|
||||
for color in range(3):
|
||||
value = dominant_avg[color] / (brightest / mitigate) if brightest > mitigate else dominant_avg[color]
|
||||
final_dominant.append(int(value))
|
||||
|
||||
return tuple(final_dominant), remaining
|
||||
|
||||
|
||||
def image_fix_orientation(image):
|
||||
"""Fix the orientation of the image if it has an EXIF orientation tag.
|
||||
|
||||
This typically happens for images taken from a non-standard orientation
|
||||
by some phones or other devices that are able to report orientation.
|
||||
|
||||
The specified transposition is applied to the image before all other
|
||||
operations, because all of them expect the image to be in its final
|
||||
orientation, which is the case only when the first row of pixels is the top
|
||||
of the image and the first column of pixels is the left of the image.
|
||||
|
||||
Moreover the EXIF tags will not be kept when the image is later saved, so
|
||||
the transposition has to be done to ensure the final image is correctly
|
||||
orientated.
|
||||
|
||||
Note: to be completely correct, the resulting image should have its exif
|
||||
orientation tag removed, since the transpositions have been applied.
|
||||
However since this tag is not used in the code, it is acceptable to
|
||||
save the complexity of removing it.
|
||||
|
||||
:param image: the source image
|
||||
:type image: ~PIL.Image.Image
|
||||
:return: the resulting image, copy of the source, with orientation fixed
|
||||
or the source image if no operation was applied
|
||||
:rtype: ~PIL.Image.Image
|
||||
"""
|
||||
getexif = getattr(image, 'getexif', None) or getattr(image, '_getexif', None) # support PIL < 6.0
|
||||
if getexif:
|
||||
exif = getexif()
|
||||
if exif:
|
||||
orientation = exif.get(EXIF_TAG_ORIENTATION, 0)
|
||||
for method in EXIF_TAG_ORIENTATION_TO_TRANSPOSE_METHODS.get(orientation, []):
|
||||
image = image.transpose(method)
|
||||
return image
|
||||
return image
|
||||
|
||||
|
||||
def binary_to_image(source):
|
||||
try:
|
||||
return Image.open(io.BytesIO(source))
|
||||
except (OSError, binascii.Error):
|
||||
raise UserError(_("This file could not be decoded as an image file."))
|
||||
|
||||
def base64_to_image(base64_source):
|
||||
"""Return a PIL image from the given `base64_source`.
|
||||
|
||||
:param base64_source: the image base64 encoded
|
||||
:type base64_source: string or bytes
|
||||
:rtype: ~PIL.Image.Image
|
||||
:raise: UserError if the base64 is incorrect or the image can't be identified by PIL
|
||||
"""
|
||||
try:
|
||||
return Image.open(io.BytesIO(base64.b64decode(base64_source)))
|
||||
except (OSError, binascii.Error):
|
||||
raise UserError(_("This file could not be decoded as an image file."))
|
||||
|
||||
|
||||
def image_apply_opt(image, output_format, **params):
|
||||
"""Return the given PIL `image` using `params`.
|
||||
|
||||
:type image: ~PIL.Image.Image
|
||||
:param str output_format: :meth:`~PIL.Image.Image.save`'s ``format`` parameter
|
||||
:param dict params: params to expand when calling :meth:`~PIL.Image.Image.save`
|
||||
:return: the image formatted
|
||||
:rtype: bytes
|
||||
"""
|
||||
if output_format == 'JPEG' and image.mode not in ['1', 'L', 'RGB']:
|
||||
image = image.convert("RGB")
|
||||
stream = io.BytesIO()
|
||||
image.save(stream, format=output_format, **params)
|
||||
return stream.getvalue()
|
||||
|
||||
|
||||
def image_to_base64(image, output_format, **params):
|
||||
"""Return a base64_image from the given PIL `image` using `params`.
|
||||
|
||||
:type image: ~PIL.Image.Image
|
||||
:param str output_format:
|
||||
:param dict params: params to expand when calling :meth:`~PIL.Image.Image.save`
|
||||
:return: the image base64 encoded
|
||||
:rtype: bytes
|
||||
"""
|
||||
stream = image_apply_opt(image, output_format, **params)
|
||||
return base64.b64encode(stream)
|
||||
|
||||
|
||||
def is_image_size_above(base64_source_1, base64_source_2):
|
||||
"""Return whether or not the size of the given image `base64_source_1` is
|
||||
above the size of the given image `base64_source_2`.
|
||||
"""
|
||||
if not base64_source_1 or not base64_source_2:
|
||||
return False
|
||||
if base64_source_1[:1] in (b'P', 'P') or base64_source_2[:1] in (b'P', 'P'):
|
||||
# False for SVG
|
||||
return False
|
||||
image_source = image_fix_orientation(base64_to_image(base64_source_1))
|
||||
image_target = image_fix_orientation(base64_to_image(base64_source_2))
|
||||
return image_source.width > image_target.width or image_source.height > image_target.height
|
||||
|
||||
|
||||
def image_guess_size_from_field_name(field_name):
|
||||
"""Attempt to guess the image size based on `field_name`.
|
||||
|
||||
If it can't be guessed or if it is a custom field: return (0, 0) instead.
|
||||
|
||||
:param str field_name: the name of a field
|
||||
:return: the guessed size
|
||||
:rtype: tuple (width, height)
|
||||
"""
|
||||
if field_name == 'image':
|
||||
return (1024, 1024)
|
||||
if field_name.startswith('x_'):
|
||||
return (0, 0)
|
||||
try:
|
||||
suffix = int(field_name.split('_')[-1])
|
||||
except ValueError:
|
||||
return (0, 0)
|
||||
|
||||
if suffix < 16:
|
||||
# If the suffix is less than 16, it's probably not the size
|
||||
return (0, 0)
|
||||
|
||||
return (suffix, suffix)
|
||||
|
||||
|
||||
def image_data_uri(base64_source):
|
||||
"""This returns data URL scheme according RFC 2397
|
||||
(https://tools.ietf.org/html/rfc2397) for all kind of supported images
|
||||
(PNG, GIF, JPG and SVG), defaulting on PNG type if not mimetype detected.
|
||||
"""
|
||||
return 'data:image/%s;base64,%s' % (
|
||||
FILETYPE_BASE64_MAGICWORD.get(base64_source[:1], 'png'),
|
||||
base64_source.decode(),
|
||||
)
|
||||
|
||||
|
||||
def get_saturation(rgb):
|
||||
"""Returns the saturation (hsl format) of a given rgb color
|
||||
|
||||
:param rgb: rgb tuple or list
|
||||
:return: saturation
|
||||
"""
|
||||
c_max = max(rgb) / 255
|
||||
c_min = min(rgb) / 255
|
||||
d = c_max - c_min
|
||||
return 0 if d == 0 else d / (1 - abs(c_max + c_min - 1))
|
||||
|
||||
|
||||
def get_lightness(rgb):
|
||||
"""Returns the lightness (hsl format) of a given rgb color
|
||||
|
||||
:param rgb: rgb tuple or list
|
||||
:return: lightness
|
||||
"""
|
||||
return (max(rgb) + min(rgb)) / 2 / 255
|
||||
|
||||
|
||||
def hex_to_rgb(hx):
|
||||
"""Converts an hexadecimal string (starting with '#') to a RGB tuple"""
|
||||
return tuple([int(hx[i:i+2], 16) for i in range(1, 6, 2)])
|
||||
|
||||
|
||||
def rgb_to_hex(rgb):
|
||||
"""Converts a RGB tuple or list to an hexadecimal string"""
|
||||
return '#' + ''.join([(hex(c).split('x')[-1].zfill(2)) for c in rgb])
|
||||
726
odoo-bringout-oca-ocb-base/odoo/tools/js_transpiler.py
Normal file
726
odoo-bringout-oca-ocb-base/odoo/tools/js_transpiler.py
Normal file
|
|
@ -0,0 +1,726 @@
|
|||
"""
|
||||
This code is what let us use ES6-style modules in odoo.
|
||||
Classic Odoo modules are composed of a top-level :samp:`odoo.define({name},{body_function})` call.
|
||||
This processor will take files starting with an `@odoo-module` annotation (in a comment) and convert them to classic modules.
|
||||
If any file has the ``/** odoo-module */`` on top of it, it will get processed by this class.
|
||||
It performs several operations to get from ES6 syntax to the usual odoo one with minimal changes.
|
||||
This is done on the fly, this not a pre-processing tool.
|
||||
|
||||
Caveat: This is done without a full parser, only using regex. One can only expect to cover as much edge cases
|
||||
as possible with reasonable limitations. Also, this only changes imports and exports, so all JS features used in
|
||||
the original source need to be supported by the browsers.
|
||||
"""
|
||||
|
||||
import re
|
||||
import logging
|
||||
from functools import partial
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
def transpile_javascript(url, content):
|
||||
"""
|
||||
Transpile the code from native JS modules to custom odoo modules.
|
||||
|
||||
:param content: The original source code
|
||||
:param url: The url of the file in the project
|
||||
:return: The transpiled source code
|
||||
"""
|
||||
module_path = url_to_module_path(url)
|
||||
legacy_odoo_define = get_aliased_odoo_define_content(module_path, content)
|
||||
|
||||
# The order of the operations does sometimes matter.
|
||||
steps = [
|
||||
convert_legacy_default_import,
|
||||
convert_basic_import,
|
||||
convert_default_and_named_import,
|
||||
convert_default_and_star_import,
|
||||
convert_default_import,
|
||||
convert_star_import,
|
||||
convert_unnamed_relative_import,
|
||||
convert_from_export,
|
||||
convert_star_from_export,
|
||||
partial(convert_relative_require, url),
|
||||
remove_index,
|
||||
convert_export_function,
|
||||
convert_export_class,
|
||||
convert_variable_export,
|
||||
convert_object_export,
|
||||
convert_default_export,
|
||||
partial(wrap_with_odoo_define, module_path),
|
||||
]
|
||||
for s in steps:
|
||||
content = s(content)
|
||||
if legacy_odoo_define:
|
||||
content += legacy_odoo_define
|
||||
return content
|
||||
|
||||
|
||||
URL_RE = re.compile(r"""
|
||||
/?(?P<module>\S+) # /module name
|
||||
/([\S/]*/)?static/ # ... /static/
|
||||
(?P<type>src|tests|lib) # src, test, or lib file
|
||||
(?P<url>/[\S/]*) # URL (/...)
|
||||
""", re.VERBOSE)
|
||||
|
||||
|
||||
def url_to_module_path(url):
|
||||
"""
|
||||
Odoo modules each have a name. (odoo.define("<the name>", async function (require) {...});
|
||||
It is used in to be required later. (const { something } = require("<the name>").
|
||||
The transpiler transforms the url of the file in the project to this name.
|
||||
It takes the module name and add a @ on the start of it, and map it to be the source of the static/src (or
|
||||
static/tests, or static/lib) folder in that module.
|
||||
|
||||
in: web/static/src/one/two/three.js
|
||||
out: @web/one/two/three.js
|
||||
The module would therefore be defined and required by this path.
|
||||
|
||||
:param url: an url in the project
|
||||
:return: a special path starting with @<module-name>.
|
||||
"""
|
||||
match = URL_RE.match(url)
|
||||
if match:
|
||||
url = match["url"]
|
||||
if url.endswith(('/index.js', '/index')):
|
||||
url, _ = url.rsplit('/', 1)
|
||||
if url.endswith('.js'):
|
||||
url = url[:-3]
|
||||
if match["type"] == "src":
|
||||
return "@%s%s" % (match['module'], url)
|
||||
elif match["type"] == "lib":
|
||||
return "@%s/../lib%s" % (match['module'], url)
|
||||
else:
|
||||
return "@%s/../tests%s" % (match['module'], url)
|
||||
else:
|
||||
raise ValueError("The js file %r must be in the folder '/static/src' or '/static/lib' or '/static/test'" % url)
|
||||
|
||||
|
||||
def wrap_with_odoo_define(module_path, content):
|
||||
"""
|
||||
Wraps the current content (source code) with the odoo.define call.
|
||||
Should logically be called once all other operations have been performed.
|
||||
"""
|
||||
return f"""odoo.define({module_path!r}, async function (require) {{
|
||||
'use strict';
|
||||
let __exports = {{}};
|
||||
{content}
|
||||
return __exports;
|
||||
}});
|
||||
"""
|
||||
|
||||
|
||||
EXPORT_FCT_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s+ # export
|
||||
(?P<type>(async\s+)?function)\s+ # async function or function
|
||||
(?P<identifier>\w+) # name the function
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_export_function(content):
|
||||
"""
|
||||
Transpile functions that are being exported.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export function name
|
||||
// after
|
||||
__exports.name = name; function name
|
||||
|
||||
// before
|
||||
export async function name
|
||||
// after
|
||||
__exports.name = name; async function name
|
||||
|
||||
"""
|
||||
repl = r"\g<space>__exports.\g<identifier> = \g<identifier>; \g<type> \g<identifier>"
|
||||
return EXPORT_FCT_RE.sub(repl, content)
|
||||
|
||||
EXPORT_CLASS_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s+ # export
|
||||
(?P<type>class)\s+ # class
|
||||
(?P<identifier>\w+) # name of the class
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_export_class(content):
|
||||
"""
|
||||
Transpile classes that are being exported.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export class name
|
||||
// after
|
||||
const name = __exports.name = class name
|
||||
|
||||
"""
|
||||
repl = r"\g<space>const \g<identifier> = __exports.\g<identifier> = \g<type> \g<identifier>"
|
||||
return EXPORT_CLASS_RE.sub(repl, content)
|
||||
|
||||
|
||||
EXPORT_FCT_DEFAULT_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s+default\s+ # export default
|
||||
(?P<type>(async\s+)?function)\s+ # async function or function
|
||||
(?P<identifier>\w+) # name of the function
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_export_function_default(content):
|
||||
"""
|
||||
Transpile functions that are being exported as default value.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export default function name
|
||||
// after
|
||||
__exports[Symbol.for("default")] = name; function name
|
||||
|
||||
// before
|
||||
export default async function name
|
||||
// after
|
||||
__exports[Symbol.for("default")] = name; async function name
|
||||
|
||||
"""
|
||||
repl = r"""\g<space>__exports[Symbol.for("default")] = \g<identifier>; \g<type> \g<identifier>"""
|
||||
return EXPORT_FCT_DEFAULT_RE.sub(repl, content)
|
||||
|
||||
EXPORT_CLASS_DEFAULT_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s+default\s+ # export default
|
||||
(?P<type>class)\s+ # class
|
||||
(?P<identifier>\w+) # name of the class or the function
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_export_class_default(content):
|
||||
"""
|
||||
Transpile classes that are being exported as default value.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export default class name
|
||||
// after
|
||||
const name = __exports[Symbol.for("default")] = class name
|
||||
|
||||
"""
|
||||
repl = r"""\g<space>const \g<identifier> = __exports[Symbol.for("default")] = \g<type> \g<identifier>"""
|
||||
return EXPORT_CLASS_DEFAULT_RE.sub(repl, content)
|
||||
|
||||
EXPORT_VAR_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s+ # export
|
||||
(?P<type>let|const|var)\s+ # let or cont or var
|
||||
(?P<identifier>\w+) # variable name
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_variable_export(content):
|
||||
"""
|
||||
Transpile variables that are being exported.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export let name
|
||||
// after
|
||||
let name = __exports.name
|
||||
// (same with var and const)
|
||||
|
||||
"""
|
||||
repl = r"\g<space>\g<type> \g<identifier> = __exports.\g<identifier>"
|
||||
return EXPORT_VAR_RE.sub(repl, content)
|
||||
|
||||
|
||||
EXPORT_DEFAULT_VAR_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s+default\s+ # export default
|
||||
(?P<type>let|const|var)\s+ # let or const or var
|
||||
(?P<identifier>\w+)\s* # variable name
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_variable_export_default(content):
|
||||
"""
|
||||
Transpile the variables that are exported as default values.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export default let name
|
||||
// after
|
||||
let name = __exports[Symbol.for("default")]
|
||||
|
||||
"""
|
||||
repl = r"""\g<space>\g<type> \g<identifier> = __exports[Symbol.for("default")]"""
|
||||
return EXPORT_DEFAULT_VAR_RE.sub(repl, content)
|
||||
|
||||
|
||||
EXPORT_OBJECT_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s* # export
|
||||
(?P<object>{[\w\s,]+}) # { a, b, c as x, ... }
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_object_export(content):
|
||||
"""
|
||||
Transpile exports of multiple elements
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export { a, b, c as x }
|
||||
// after
|
||||
Object.assign(__exports, { a, b, x: c })
|
||||
"""
|
||||
def repl(matchobj):
|
||||
object_process = "{" + ", ".join([convert_as(val) for val in matchobj["object"][1:-1].split(",")]) + "}"
|
||||
space = matchobj["space"]
|
||||
return f"{space}Object.assign(__exports, {object_process})"
|
||||
return EXPORT_OBJECT_RE.sub(repl, content)
|
||||
|
||||
|
||||
EXPORT_FROM_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s* # export
|
||||
(?P<object>{[\w\s,]+})\s* # { a, b, c as x, ... }
|
||||
from\s* # from
|
||||
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path.js")
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_from_export(content):
|
||||
"""
|
||||
Transpile exports coming from another source
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export { a, b, c as x } from "some/path.js"
|
||||
// after
|
||||
{ a, b, c } = {require("some/path.js"); Object.assign(__exports, { a, b, x: c });}
|
||||
"""
|
||||
def repl(matchobj):
|
||||
object_clean = "{" + ",".join([remove_as(val) for val in matchobj["object"][1:-1].split(",")]) + "}"
|
||||
object_process = "{" + ", ".join([convert_as(val) for val in matchobj["object"][1:-1].split(",")]) + "}"
|
||||
return "%(space)s{const %(object_clean)s = require(%(path)s);Object.assign(__exports, %(object_process)s)}" % {
|
||||
'object_clean': object_clean,
|
||||
'object_process': object_process,
|
||||
'space': matchobj['space'],
|
||||
'path': matchobj['path'],
|
||||
}
|
||||
return EXPORT_FROM_RE.sub(repl, content)
|
||||
|
||||
|
||||
EXPORT_STAR_FROM_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s*\*\s*from\s* # export * from
|
||||
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path.js")
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_star_from_export(content):
|
||||
"""
|
||||
Transpile exports star coming from another source
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export * from "some/path.js"
|
||||
// after
|
||||
Object.assign(__exports, require("some/path.js"))
|
||||
"""
|
||||
repl = r"\g<space>Object.assign(__exports, require(\g<path>))"
|
||||
return EXPORT_STAR_FROM_RE.sub(repl, content)
|
||||
|
||||
|
||||
EXPORT_DEFAULT_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
export\s+default # export default
|
||||
(\s+\w+\s*=)? # something (optional)
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_default_export(content):
|
||||
"""
|
||||
This function handles the default exports.
|
||||
Either by calling another operation with a TRUE flag, and if any default is left, doing a simple replacement.
|
||||
|
||||
(see convert_export_function_or_class_default and convert_variable_export_default).
|
||||
+
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export default
|
||||
// after
|
||||
__exports[Symbol.for("default")] =
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
export default something =
|
||||
// after
|
||||
__exports[Symbol.for("default")] =
|
||||
"""
|
||||
new_content = convert_export_function_default(content)
|
||||
new_content = convert_export_class_default(new_content)
|
||||
new_content = convert_variable_export_default(new_content)
|
||||
repl = r"""\g<space>__exports[Symbol.for("default")] ="""
|
||||
return EXPORT_DEFAULT_RE.sub(repl, new_content)
|
||||
|
||||
|
||||
IMPORT_BASIC_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
import\s+ # import
|
||||
(?P<object>{[\s\w,]+})\s* # { a, b, c as x, ... }
|
||||
from\s* # from
|
||||
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path")
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_basic_import(content):
|
||||
"""
|
||||
Transpile the simpler import call.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
import { a, b, c as x } from "some/path"
|
||||
// after
|
||||
const {a, b, c: x} = require("some/path")
|
||||
"""
|
||||
def repl(matchobj):
|
||||
new_object = matchobj["object"].replace(" as ", ": ")
|
||||
return f"{matchobj['space']}const {new_object} = require({matchobj['path']})"
|
||||
return IMPORT_BASIC_RE.sub(repl, content)
|
||||
|
||||
|
||||
IMPORT_LEGACY_DEFAULT_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
import\s+ # import
|
||||
(?P<identifier>\w+)\s* # default variable name
|
||||
from\s* # from
|
||||
(?P<path>(?P<quote>["'`])([^@\."'`][^"'`]*)(?P=quote)) # legacy alias file ("addon_name.module_name" or "some/path")
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_legacy_default_import(content):
|
||||
"""
|
||||
Transpile legacy imports (that were used as they were default import).
|
||||
Legacy imports means that their name is not a path but a <addon_name>.<module_name>.
|
||||
It requires slightly different processing.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
import module_name from "addon.module_name"
|
||||
// after
|
||||
const module_name = require("addon.module_name")
|
||||
"""
|
||||
repl = r"""\g<space>const \g<identifier> = require(\g<path>)"""
|
||||
return IMPORT_LEGACY_DEFAULT_RE.sub(repl, content)
|
||||
|
||||
|
||||
IMPORT_DEFAULT = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
import\s+ # import
|
||||
(?P<identifier>\w+)\s* # default variable name
|
||||
from\s* # from
|
||||
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path")
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_default_import(content):
|
||||
"""
|
||||
Transpile the default import call.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
import something from "some/path"
|
||||
// after
|
||||
const something = require("some/path")[Symbol.for("default")]
|
||||
"""
|
||||
repl = r"""\g<space>const \g<identifier> = require(\g<path>)[Symbol.for("default")]"""
|
||||
return IMPORT_DEFAULT.sub(repl, content)
|
||||
|
||||
|
||||
IS_PATH_LEGACY_RE = re.compile(r"""(?P<quote>["'`])([^@\."'`][^"'`]*)(?P=quote)""")
|
||||
|
||||
IMPORT_DEFAULT_AND_NAMED_RE = re.compile(r"""
|
||||
^
|
||||
(?P<space>\s*) # space and empty line
|
||||
import\s+ # import
|
||||
(?P<default_export>\w+)\s*,\s* # default variable name,
|
||||
(?P<named_exports>{[\s\w,]+})\s* # { a, b, c as x, ... }
|
||||
from\s* # from
|
||||
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path")
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_default_and_named_import(content):
|
||||
"""
|
||||
Transpile default and named import on one line.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
import something, { a } from "some/path";
|
||||
import somethingElse, { b } from "legacy.module";
|
||||
// after
|
||||
const { [Symbol.for("default")]: something, a } = require("some/path");
|
||||
const somethingElse = require("legacy.module");
|
||||
const { b } = somethingElse;
|
||||
"""
|
||||
def repl(matchobj):
|
||||
is_legacy = IS_PATH_LEGACY_RE.match(matchobj['path'])
|
||||
new_object = matchobj["named_exports"].replace(" as ", ": ")
|
||||
if is_legacy:
|
||||
return f"""{matchobj['space']}const {matchobj['default_export']} = require({matchobj['path']});
|
||||
{matchobj['space']}const {new_object} = {matchobj['default_export']}"""
|
||||
new_object = f"""{{ [Symbol.for("default")]: {matchobj['default_export']},{new_object[1:]}"""
|
||||
return f"{matchobj['space']}const {new_object} = require({matchobj['path']})"
|
||||
return IMPORT_DEFAULT_AND_NAMED_RE.sub(repl, content)
|
||||
|
||||
|
||||
RELATIVE_REQUIRE_RE = re.compile(r"""
|
||||
require\((?P<quote>["'`])([^@"'`]+)(?P=quote)\) # require("some/path")
|
||||
""", re.VERBOSE)
|
||||
|
||||
|
||||
def convert_relative_require(url, content):
|
||||
"""
|
||||
Convert the relative path contained in a 'require()'
|
||||
to the new path system (@module/path)
|
||||
.. code-block:: javascript
|
||||
|
||||
// Relative path:
|
||||
// before
|
||||
require("./path")
|
||||
// after
|
||||
require("@module/path")
|
||||
|
||||
// Not a relative path:
|
||||
// before
|
||||
require("other_alias")
|
||||
// after
|
||||
require("other_alias")
|
||||
"""
|
||||
new_content = content
|
||||
for quote, path in RELATIVE_REQUIRE_RE.findall(new_content):
|
||||
if path.startswith(".") and "/" in path:
|
||||
pattern = rf"require\({quote}{path}{quote}\)"
|
||||
repl = f'require("{relative_path_to_module_path(url, path)}")'
|
||||
new_content = re.sub(pattern, repl, new_content)
|
||||
return new_content
|
||||
|
||||
|
||||
IMPORT_STAR = re.compile(r"""
|
||||
^(?P<space>\s*) # indentation
|
||||
import\s+\*\s+as\s+ # import * as
|
||||
(?P<identifier>\w+) # alias
|
||||
\s*from\s* # from
|
||||
(?P<path>[^;\n]+) # path
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_star_import(content):
|
||||
"""
|
||||
Transpile import star.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
import * as name from "some/path"
|
||||
// after
|
||||
const name = require("some/path")
|
||||
"""
|
||||
repl = r"\g<space>const \g<identifier> = require(\g<path>)"
|
||||
return IMPORT_STAR.sub(repl, content)
|
||||
|
||||
|
||||
IMPORT_DEFAULT_AND_STAR = re.compile(r"""
|
||||
^(?P<space>\s*) # indentation
|
||||
import\s+ # import
|
||||
(?P<default_export>\w+)\s*,\s* # default export name,
|
||||
\*\s+as\s+ # * as
|
||||
(?P<named_exports_alias>\w+) # alias
|
||||
\s*from\s* # from
|
||||
(?P<path>[^;\n]+) # path
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_default_and_star_import(content):
|
||||
"""
|
||||
Transpile import star.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
import something, * as name from "some/path";
|
||||
// after
|
||||
const name = require("some/path");
|
||||
const something = name[Symbol.for("default")];
|
||||
"""
|
||||
repl = r"""\g<space>const \g<named_exports_alias> = require(\g<path>);
|
||||
\g<space>const \g<default_export> = \g<named_exports_alias>[Symbol.for("default")]"""
|
||||
return IMPORT_DEFAULT_AND_STAR.sub(repl, content)
|
||||
|
||||
|
||||
IMPORT_UNNAMED_RELATIVE_RE = re.compile(r"""
|
||||
^(?P<space>\s*) # indentation
|
||||
import\s+ # import
|
||||
(?P<path>[^;\n]+) # relative path
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def convert_unnamed_relative_import(content):
|
||||
"""
|
||||
Transpile relative "direct" imports. Direct meaning they are not store in a variable.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
import "some/path"
|
||||
// after
|
||||
require("some/path")
|
||||
"""
|
||||
repl = r"require(\g<path>)"
|
||||
return IMPORT_UNNAMED_RELATIVE_RE.sub(repl, content)
|
||||
|
||||
|
||||
URL_INDEX_RE = re.compile(r"""
|
||||
require\s* # require
|
||||
\(\s* # (
|
||||
(?P<path>(?P<quote>["'`])([^"'`]*/index/?)(?P=quote)) # path ended by /index or /index/
|
||||
\s*\) # )
|
||||
""", re.MULTILINE | re.VERBOSE)
|
||||
|
||||
|
||||
def remove_index(content):
|
||||
"""
|
||||
Remove in the paths the /index.js.
|
||||
We want to be able to import a module just trough its directory name if it contains an index.js.
|
||||
So we no longer need to specify the index.js in the paths.
|
||||
"""
|
||||
def repl(matchobj):
|
||||
path = matchobj["path"]
|
||||
new_path = path[: path.rfind("/index")] + path[0]
|
||||
return f"require({new_path})"
|
||||
return URL_INDEX_RE.sub(repl, content)
|
||||
|
||||
|
||||
def relative_path_to_module_path(url, path_rel):
|
||||
"""Convert the relative path into a module path, which is more generic and
|
||||
fancy.
|
||||
|
||||
:param str url:
|
||||
:param path_rel: a relative path to the current url.
|
||||
:return: module path (@module/...)
|
||||
"""
|
||||
url_split = url.split("/")
|
||||
path_rel_split = path_rel.split("/")
|
||||
nb_back = len([v for v in path_rel_split if v == ".."]) + 1
|
||||
result = "/".join(url_split[:-nb_back] + [v for v in path_rel_split if not v in ["..", "."]])
|
||||
return url_to_module_path(result)
|
||||
|
||||
|
||||
ODOO_MODULE_RE = re.compile(r"""
|
||||
\s* # some starting space
|
||||
\/(\*|\/).*\s* # // or /*
|
||||
@odoo-module # @odoo-module
|
||||
(\s+alias=(?P<alias>[\w.]+))? # alias=web.AbstractAction (optional)
|
||||
(\s+default=(?P<default>False|false|0))? # default=False or false or 0 (optional)
|
||||
""", re.VERBOSE)
|
||||
|
||||
|
||||
def is_odoo_module(content):
|
||||
"""
|
||||
Detect if the file is a native odoo module.
|
||||
We look for a comment containing @odoo-module.
|
||||
|
||||
:param content: source code
|
||||
:return: is this a odoo module that need transpilation ?
|
||||
"""
|
||||
result = ODOO_MODULE_RE.match(content)
|
||||
return bool(result)
|
||||
|
||||
|
||||
def get_aliased_odoo_define_content(module_path, content):
|
||||
"""
|
||||
To allow smooth transition between the new system and the legacy one, we have the possibility to
|
||||
defined an alternative module name (an alias) that will act as proxy between legacy require calls and
|
||||
new modules.
|
||||
|
||||
Example:
|
||||
If we have a require call somewhere in the odoo source base being:
|
||||
> vat AbstractAction require("web.AbstractAction")
|
||||
we have a problem when we will have converted to module to ES6: its new name will be more like
|
||||
"web/chrome/abstract_action". So the require would fail !
|
||||
So we add a second small modules, an alias, as such:
|
||||
> odoo.define("web/chrome/abstract_action", async function(require) {
|
||||
> return require('web.AbstractAction')[Symbol.for("default")];
|
||||
> });
|
||||
|
||||
To generate this, change your comment on the top of the file.
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
/** @odoo-module */
|
||||
// after
|
||||
/** @odoo-module alias=web.AbstractAction */
|
||||
|
||||
Notice that often, the legacy system acted like they it did defaukt imports. That's why we have the
|
||||
"[Symbol.for("default")];" bit. If your use case does not need this default import, just do:
|
||||
|
||||
.. code-block:: javascript
|
||||
|
||||
// before
|
||||
/** @odoo-module */
|
||||
// after
|
||||
/** @odoo-module alias=web.AbstractAction default=false */
|
||||
|
||||
:return: the alias content to append to the source code.
|
||||
"""
|
||||
matchobj = ODOO_MODULE_RE.match(content)
|
||||
if matchobj:
|
||||
alias = matchobj['alias']
|
||||
if alias:
|
||||
if matchobj['default']:
|
||||
return """\nodoo.define(`%s`, async function(require) {
|
||||
return require('%s');
|
||||
});\n""" % (alias, module_path)
|
||||
else:
|
||||
return """\nodoo.define(`%s`, async function(require) {
|
||||
return require('%s')[Symbol.for("default")];
|
||||
});\n""" % (alias, module_path)
|
||||
|
||||
|
||||
def convert_as(val):
|
||||
parts = val.split(" as ")
|
||||
return val if len(parts) < 2 else "%s: %s" % tuple(reversed(parts))
|
||||
|
||||
|
||||
def remove_as(val):
|
||||
parts = val.split(" as ")
|
||||
return val if len(parts) < 2 else parts[0]
|
||||
55
odoo-bringout-oca-ocb-base/odoo/tools/json.py
Normal file
55
odoo-bringout-oca-ocb-base/odoo/tools/json.py
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import json as json_
|
||||
import re
|
||||
|
||||
import markupsafe
|
||||
|
||||
JSON_SCRIPTSAFE_MAPPER = {
|
||||
'&': r'\u0026',
|
||||
'<': r'\u003c',
|
||||
'>': r'\u003e',
|
||||
'\u2028': r'\u2028',
|
||||
'\u2029': r'\u2029'
|
||||
}
|
||||
class _ScriptSafe(str):
|
||||
def __html__(self):
|
||||
# replacement can be done straight in the serialised JSON as the
|
||||
# problematic characters are not JSON metacharacters (and can thus
|
||||
# only occur in strings)
|
||||
return markupsafe.Markup(re.sub(
|
||||
r'[<>&\u2028\u2029]',
|
||||
lambda m: JSON_SCRIPTSAFE_MAPPER[m[0]],
|
||||
self,
|
||||
))
|
||||
class JSON:
|
||||
def loads(self, *args, **kwargs):
|
||||
return json_.loads(*args, **kwargs)
|
||||
def dumps(self, *args, **kwargs):
|
||||
""" JSON used as JS in HTML (script tags) is problematic: <script>
|
||||
tags are a special context which only waits for </script> but doesn't
|
||||
interpret anything else, this means standard htmlescaping does not
|
||||
work (it breaks double quotes, and e.g. `<` will become `<` *in
|
||||
the resulting JSON/JS* not just inside the page).
|
||||
|
||||
However, failing to escape embedded json means the json strings could
|
||||
contains `</script>` and thus become XSS vector.
|
||||
|
||||
The solution turns out to be very simple: use JSON-level unicode
|
||||
escapes for HTML-unsafe characters (e.g. "<" -> "\u003C". This removes
|
||||
the XSS issue without breaking the json, and there is no difference to
|
||||
the end result once it's been parsed back from JSON. So it will work
|
||||
properly even for HTML attributes or raw text.
|
||||
|
||||
Also handle U+2028 and U+2029 the same way just in case as these are
|
||||
interpreted as newlines in javascript but not in JSON, which could
|
||||
lead to oddities and issues.
|
||||
|
||||
.. warning::
|
||||
|
||||
except inside <script> elements, this should be escaped following
|
||||
the normal rules of the containing format
|
||||
|
||||
Cf https://code.djangoproject.com/ticket/17419#comment:27
|
||||
"""
|
||||
return _ScriptSafe(json_.dumps(*args, **kwargs))
|
||||
scriptsafe = JSON()
|
||||
59
odoo-bringout-oca-ocb-base/odoo/tools/lru.py
Normal file
59
odoo-bringout-oca-ocb-base/odoo/tools/lru.py
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import collections
|
||||
import threading
|
||||
|
||||
from .func import locked
|
||||
|
||||
__all__ = ['LRU']
|
||||
|
||||
class LRU(object):
|
||||
"""
|
||||
Implementation of a length-limited O(1) LRU map.
|
||||
|
||||
Original Copyright 2003 Josiah Carlson, later rebuilt on OrderedDict.
|
||||
"""
|
||||
def __init__(self, count, pairs=()):
|
||||
self._lock = threading.RLock()
|
||||
self.count = max(count, 1)
|
||||
self.d = collections.OrderedDict()
|
||||
for key, value in pairs:
|
||||
self[key] = value
|
||||
|
||||
@locked
|
||||
def __contains__(self, obj):
|
||||
return obj in self.d
|
||||
|
||||
def get(self, obj, val=None):
|
||||
try:
|
||||
return self[obj]
|
||||
except KeyError:
|
||||
return val
|
||||
|
||||
@locked
|
||||
def __getitem__(self, obj):
|
||||
a = self.d[obj]
|
||||
self.d.move_to_end(obj, last=False)
|
||||
return a
|
||||
|
||||
@locked
|
||||
def __setitem__(self, obj, val):
|
||||
self.d[obj] = val
|
||||
self.d.move_to_end(obj, last=False)
|
||||
while len(self.d) > self.count:
|
||||
self.d.popitem(last=True)
|
||||
|
||||
@locked
|
||||
def __delitem__(self, obj):
|
||||
del self.d[obj]
|
||||
|
||||
@locked
|
||||
def __len__(self):
|
||||
return len(self.d)
|
||||
|
||||
@locked
|
||||
def pop(self,key):
|
||||
return self.d.pop(key)
|
||||
|
||||
@locked
|
||||
def clear(self):
|
||||
self.d.clear()
|
||||
886
odoo-bringout-oca-ocb-base/odoo/tools/mail.py
Normal file
886
odoo-bringout-oca-ocb-base/odoo/tools/mail.py
Normal file
|
|
@ -0,0 +1,886 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
import email.utils
|
||||
from email.utils import getaddresses as orig_getaddresses
|
||||
from urllib.parse import urlparse
|
||||
import html as htmllib
|
||||
|
||||
import idna
|
||||
import markupsafe
|
||||
from lxml import etree, html
|
||||
from lxml.html import clean, defs
|
||||
from werkzeug import urls
|
||||
|
||||
import odoo
|
||||
from odoo.loglevels import ustr
|
||||
from odoo.tools import misc
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# disable strict mode when present: we rely on original non-strict
|
||||
# parsing, and we know that it isn't reliable, that ok.
|
||||
# cfr python/cpython@4a153a1d3b18803a684cd1bcc2cdf3ede3dbae19
|
||||
if hasattr(email.utils, 'supports_strict_parsing'):
|
||||
def getaddresses(fieldvalues):
|
||||
return orig_getaddresses(fieldvalues, strict=False)
|
||||
else:
|
||||
getaddresses = orig_getaddresses
|
||||
|
||||
|
||||
#----------------------------------------------------------
|
||||
# HTML Sanitizer
|
||||
#----------------------------------------------------------
|
||||
|
||||
safe_attrs = defs.safe_attrs | frozenset(
|
||||
['style',
|
||||
'data-o-mail-quote', 'data-o-mail-quote-node', # quote detection
|
||||
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translation-initial-sha', 'data-oe-nodeid',
|
||||
'data-last-history-steps', 'data-width', 'data-height', 'data-scale-x', 'data-scale-y', 'data-x', 'data-y',
|
||||
'data-publish', 'data-id', 'data-res_id', 'data-interval', 'data-member_id', 'data-scroll-background-ratio', 'data-view-id',
|
||||
'data-class', 'data-mimetype', 'data-original-src', 'data-original-id', 'data-gl-filter', 'data-quality', 'data-resize-width',
|
||||
'data-shape', 'data-shape-colors', 'data-file-name', 'data-original-mimetype',
|
||||
'data-oe-protected', # editor
|
||||
'data-behavior-props', 'data-prop-name', # knowledge commands
|
||||
])
|
||||
SANITIZE_TAGS = {
|
||||
# allow new semantic HTML5 tags
|
||||
'allow_tags': defs.tags | frozenset('article bdi section header footer hgroup nav aside figure main'.split() + [etree.Comment]),
|
||||
'kill_tags': ['base', 'embed', 'frame', 'head', 'iframe', 'link', 'meta',
|
||||
'noscript', 'object', 'script', 'style', 'title'],
|
||||
'remove_tags': ['html', 'body'],
|
||||
}
|
||||
|
||||
|
||||
class _Cleaner(clean.Cleaner):
|
||||
|
||||
_style_re = re.compile(r'''([\w-]+)\s*:\s*((?:[^;"']|"[^";]*"|'[^';]*')+)''')
|
||||
|
||||
_style_whitelist = [
|
||||
'font-size', 'font-family', 'font-weight', 'font-style', 'background-color', 'color', 'text-align',
|
||||
'line-height', 'letter-spacing', 'text-transform', 'text-decoration', 'text-decoration', 'opacity',
|
||||
'float', 'vertical-align', 'display',
|
||||
'padding', 'padding-top', 'padding-left', 'padding-bottom', 'padding-right',
|
||||
'margin', 'margin-top', 'margin-left', 'margin-bottom', 'margin-right',
|
||||
'white-space',
|
||||
# box model
|
||||
'border', 'border-color', 'border-radius', 'border-style', 'border-width', 'border-top', 'border-bottom',
|
||||
'height', 'width', 'max-width', 'min-width', 'min-height',
|
||||
# tables
|
||||
'border-collapse', 'border-spacing', 'caption-side', 'empty-cells', 'table-layout']
|
||||
|
||||
_style_whitelist.extend(
|
||||
['border-%s-%s' % (position, attribute)
|
||||
for position in ['top', 'bottom', 'left', 'right']
|
||||
for attribute in ('style', 'color', 'width', 'left-radius', 'right-radius')]
|
||||
)
|
||||
|
||||
strip_classes = False
|
||||
sanitize_style = False
|
||||
|
||||
def __call__(self, doc):
|
||||
super(_Cleaner, self).__call__(doc)
|
||||
|
||||
# if we keep attributes but still remove classes
|
||||
if not getattr(self, 'safe_attrs_only', False) and self.strip_classes:
|
||||
for el in doc.iter(tag=etree.Element):
|
||||
self.strip_class(el)
|
||||
|
||||
# if we keep style attribute, sanitize them
|
||||
if not self.style and self.sanitize_style:
|
||||
for el in doc.iter(tag=etree.Element):
|
||||
self.parse_style(el)
|
||||
|
||||
def strip_class(self, el):
|
||||
if el.attrib.get('class'):
|
||||
del el.attrib['class']
|
||||
|
||||
def parse_style(self, el):
|
||||
attributes = el.attrib
|
||||
styling = attributes.get('style')
|
||||
if styling:
|
||||
valid_styles = collections.OrderedDict()
|
||||
styles = self._style_re.findall(styling)
|
||||
for style in styles:
|
||||
if style[0].lower() in self._style_whitelist:
|
||||
valid_styles[style[0].lower()] = style[1]
|
||||
if valid_styles:
|
||||
el.attrib['style'] = '; '.join('%s:%s' % (key, val) for (key, val) in valid_styles.items())
|
||||
else:
|
||||
del el.attrib['style']
|
||||
|
||||
|
||||
def tag_quote(el):
|
||||
def _create_new_node(tag, text, tail=None, attrs=None):
|
||||
new_node = etree.Element(tag)
|
||||
new_node.text = text
|
||||
new_node.tail = tail
|
||||
if attrs:
|
||||
for key, val in attrs.items():
|
||||
new_node.set(key, val)
|
||||
return new_node
|
||||
|
||||
def _tag_matching_regex_in_text(regex, node, tag='span', attrs=None):
|
||||
text = node.text or ''
|
||||
if not re.search(regex, text):
|
||||
return
|
||||
|
||||
child_node = None
|
||||
idx, node_idx = 0, 0
|
||||
for item in re.finditer(regex, text):
|
||||
new_node = _create_new_node(tag, text[item.start():item.end()], None, attrs)
|
||||
if child_node is None:
|
||||
node.text = text[idx:item.start()]
|
||||
new_node.tail = text[item.end():]
|
||||
node.insert(node_idx, new_node)
|
||||
else:
|
||||
child_node.tail = text[idx:item.start()]
|
||||
new_node.tail = text[item.end():]
|
||||
node.insert(node_idx, new_node)
|
||||
child_node = new_node
|
||||
idx = item.end()
|
||||
node_idx = node_idx + 1
|
||||
|
||||
el_class = el.get('class', '') or ''
|
||||
el_id = el.get('id', '') or ''
|
||||
|
||||
# gmail or yahoo // # outlook, html // # msoffice
|
||||
if 'gmail_extra' in el_class or \
|
||||
('SkyDrivePlaceholder' in el_class or 'SkyDrivePlaceholder' in el_class):
|
||||
el.set('data-o-mail-quote', '1')
|
||||
if el.getparent() is not None:
|
||||
el.getparent().set('data-o-mail-quote-container', '1')
|
||||
|
||||
if (el.tag == 'hr' and ('stopSpelling' in el_class or 'stopSpelling' in el_id)) or \
|
||||
'yahoo_quoted' in el_class:
|
||||
# Quote all elements after this one
|
||||
el.set('data-o-mail-quote', '1')
|
||||
for sibling in el.itersiblings(preceding=False):
|
||||
sibling.set('data-o-mail-quote', '1')
|
||||
|
||||
# odoo, gmail and outlook automatic signature wrapper
|
||||
is_signature_wrapper = 'odoo_signature_wrapper' in el_class or 'gmail_signature' in el_class or el_id == "Signature"
|
||||
is_outlook_auto_message = 'appendonsend' in el_id
|
||||
# gmail and outlook reply quote
|
||||
is_outlook_reply_quote = 'divRplyFwdMsg' in el_id
|
||||
is_gmail_quote = 'gmail_quote' in el_class
|
||||
is_quote_wrapper = is_signature_wrapper or is_gmail_quote or is_outlook_reply_quote
|
||||
if is_quote_wrapper:
|
||||
el.set('data-o-mail-quote-container', '1')
|
||||
el.set('data-o-mail-quote', '1')
|
||||
|
||||
# outlook reply wrapper is preceded with <hr> and a div containing recipient info
|
||||
if is_outlook_reply_quote:
|
||||
hr = el.getprevious()
|
||||
reply_quote = el.getnext()
|
||||
if hr is not None and hr.tag == 'hr':
|
||||
hr.set('data-o-mail-quote', '1')
|
||||
if reply_quote is not None:
|
||||
reply_quote.set('data-o-mail-quote-container', '1')
|
||||
reply_quote.set('data-o-mail-quote', '1')
|
||||
|
||||
if is_outlook_auto_message:
|
||||
if not el.text or not el.text.strip():
|
||||
el.set('data-o-mail-quote-container', '1')
|
||||
el.set('data-o-mail-quote', '1')
|
||||
|
||||
# html signature (-- <br />blah)
|
||||
signature_begin = re.compile(r"((?:(?:^|\n)[-]{2}[\s]?$))")
|
||||
if el.text and el.find('br') is not None and re.search(signature_begin, el.text):
|
||||
el.set('data-o-mail-quote', '1')
|
||||
if el.getparent() is not None:
|
||||
el.getparent().set('data-o-mail-quote-container', '1')
|
||||
|
||||
# text-based quotes (>, >>) and signatures (-- Signature)
|
||||
text_complete_regex = re.compile(r"((?:\n[>]+[^\n\r]*)+|(?:(?:^|\n)[-]{2}[\s]?[\r\n]{1,2}[\s\S]+))")
|
||||
if not el.get('data-o-mail-quote'):
|
||||
_tag_matching_regex_in_text(text_complete_regex, el, 'span', {'data-o-mail-quote': '1'})
|
||||
|
||||
if el.tag == 'blockquote':
|
||||
# remove single node
|
||||
el.set('data-o-mail-quote-node', '1')
|
||||
el.set('data-o-mail-quote', '1')
|
||||
if el.getparent() is not None and (el.getparent().get('data-o-mail-quote') or el.getparent().get('data-o-mail-quote-container')) and not el.getparent().get('data-o-mail-quote-node'):
|
||||
el.set('data-o-mail-quote', '1')
|
||||
if el.getprevious() is not None and el.getprevious().get('data-o-mail-quote') and not el.text_content().strip():
|
||||
el.set('data-o-mail-quote', '1')
|
||||
|
||||
|
||||
def html_normalize(src, filter_callback=None):
|
||||
""" Normalize `src` for storage as an html field value.
|
||||
|
||||
The string is parsed as an html tag soup, made valid, then decorated for
|
||||
"email quote" detection, and prepared for an optional filtering.
|
||||
The filtering step (e.g. sanitization) should be performed by the
|
||||
`filter_callback` function (to avoid multiple parsing operations, and
|
||||
normalize the result).
|
||||
|
||||
:param src: the html string to normalize
|
||||
:param filter_callback: optional callable taking a single `etree._Element`
|
||||
document parameter, to be called during normalization in order to
|
||||
filter the output document
|
||||
"""
|
||||
|
||||
if not src:
|
||||
return src
|
||||
|
||||
src = ustr(src, errors='replace')
|
||||
# html: remove encoding attribute inside tags
|
||||
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
|
||||
src = doctype.sub(u"", src)
|
||||
|
||||
try:
|
||||
src = src.replace('--!>', '-->')
|
||||
src = re.sub(r'(<!-->|<!--->)', '<!-- -->', src)
|
||||
# On the specific case of Outlook desktop it adds unnecessary '<o:.*></o:.*>' tags which are parsed
|
||||
# in '<p></p>' which may alter the appearance (eg. spacing) of the mail body
|
||||
src = re.sub(r'</?o:.*?>', '', src)
|
||||
doc = html.fromstring(src)
|
||||
except etree.ParserError as e:
|
||||
# HTML comment only string, whitespace only..
|
||||
if 'empty' in str(e):
|
||||
return u""
|
||||
raise
|
||||
|
||||
# perform quote detection before cleaning and class removal
|
||||
if doc is not None:
|
||||
for el in doc.iter(tag=etree.Element):
|
||||
tag_quote(el)
|
||||
|
||||
if filter_callback:
|
||||
doc = filter_callback(doc)
|
||||
|
||||
src = html.tostring(doc, encoding='unicode')
|
||||
|
||||
# this is ugly, but lxml/etree tostring want to put everything in a
|
||||
# 'div' that breaks the editor -> remove that
|
||||
if src.startswith('<div>') and src.endswith('</div>'):
|
||||
src = src[5:-6]
|
||||
|
||||
# html considerations so real html content match database value
|
||||
src = src.replace(u'\xa0', u' ')
|
||||
|
||||
return src
|
||||
|
||||
|
||||
def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=False, sanitize_style=False, sanitize_form=True, strip_style=False, strip_classes=False):
|
||||
if not src:
|
||||
return src
|
||||
|
||||
logger = logging.getLogger(__name__ + '.html_sanitize')
|
||||
|
||||
def sanitize_handler(doc):
|
||||
kwargs = {
|
||||
'page_structure': True,
|
||||
'style': strip_style, # True = remove style tags/attrs
|
||||
'sanitize_style': sanitize_style, # True = sanitize styling
|
||||
'forms': sanitize_form, # True = remove form tags
|
||||
'remove_unknown_tags': False,
|
||||
'comments': False,
|
||||
'processing_instructions': False
|
||||
}
|
||||
if sanitize_tags:
|
||||
kwargs.update(SANITIZE_TAGS)
|
||||
|
||||
if sanitize_attributes: # We keep all attributes in order to keep "style"
|
||||
if strip_classes:
|
||||
current_safe_attrs = safe_attrs - frozenset(['class'])
|
||||
else:
|
||||
current_safe_attrs = safe_attrs
|
||||
kwargs.update({
|
||||
'safe_attrs_only': True,
|
||||
'safe_attrs': current_safe_attrs,
|
||||
})
|
||||
else:
|
||||
kwargs.update({
|
||||
'safe_attrs_only': False, # keep oe-data attributes + style
|
||||
'strip_classes': strip_classes, # remove classes, even when keeping other attributes
|
||||
})
|
||||
|
||||
cleaner = _Cleaner(**kwargs)
|
||||
cleaner(doc)
|
||||
return doc
|
||||
|
||||
try:
|
||||
sanitized = html_normalize(src, filter_callback=sanitize_handler)
|
||||
except etree.ParserError:
|
||||
if not silent:
|
||||
raise
|
||||
logger.warning(u'ParserError obtained when sanitizing %r', src, exc_info=True)
|
||||
sanitized = '<p>ParserError when sanitizing</p>'
|
||||
except Exception:
|
||||
if not silent:
|
||||
raise
|
||||
logger.warning(u'unknown error obtained when sanitizing %r', src, exc_info=True)
|
||||
sanitized = '<p>Unknown error when sanitizing</p>'
|
||||
|
||||
return markupsafe.Markup(sanitized)
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# HTML/Text management
|
||||
# ----------------------------------------------------------
|
||||
|
||||
URL_REGEX = r'(\bhref=[\'"](?!mailto:|tel:|sms:)([^\'"]+)[\'"])'
|
||||
TEXT_URL_REGEX = r'https?://[\w@:%.+&~#=/-]+(?:\?\S+)?'
|
||||
# retrieve inner content of the link
|
||||
HTML_TAG_URL_REGEX = URL_REGEX + r'([^<>]*>([^<>]+)<\/)?'
|
||||
HTML_TAGS_REGEX = re.compile('<.*?>')
|
||||
HTML_NEWLINES_REGEX = re.compile('<(div|p|br|tr)[^>]*>|\n')
|
||||
|
||||
|
||||
def validate_url(url):
|
||||
if urls.url_parse(url).scheme not in ('http', 'https', 'ftp', 'ftps'):
|
||||
return 'http://' + url
|
||||
|
||||
return url
|
||||
|
||||
|
||||
def is_html_empty(html_content):
|
||||
"""Check if a html content is empty. If there are only formatting tags with style
|
||||
attributes or a void content return True. Famous use case if a
|
||||
'<p style="..."><br></p>' added by some web editor.
|
||||
|
||||
:param str html_content: html content, coming from example from an HTML field
|
||||
:returns: bool, True if no content found or if containing only void formatting tags
|
||||
"""
|
||||
if not html_content:
|
||||
return True
|
||||
icon_re = r'<\s*(i|span)\b(\s+[A-Za-z_-][A-Za-z0-9-_]*(\s*=\s*[\'"][^"\']*[\'"])?)*\s*\bclass\s*=\s*["\'][^"\']*\b(fa|fab|fad|far|oi)\b'
|
||||
tag_re = r'<\s*\/?(?:p|div|section|span|br|b|i|font)\b(?:(\s+[A-Za-z_-][A-Za-z0-9-_]*(\s*=\s*[\'"][^"\']*[\'"]))*)(?:\s*>|\s*\/\s*>)'
|
||||
return not bool(re.sub(tag_re, '', html_content).strip()) and not re.search(icon_re, html_content)
|
||||
|
||||
|
||||
def html_keep_url(text):
|
||||
""" Transform the url into clickable link with <a/> tag """
|
||||
idx = 0
|
||||
final = ''
|
||||
link_tags = re.compile(r"""(?<!["'])((ftp|http|https):\/\/(\w+:{0,1}\w*@)?([^\s<"']+)(:[0-9]+)?(\/|\/([^\s<"']))?)(?![^\s<"']*["']|[^\s<"']*</a>)""")
|
||||
for item in re.finditer(link_tags, text):
|
||||
final += text[idx:item.start()]
|
||||
final += create_link(item.group(0), item.group(0))
|
||||
idx = item.end()
|
||||
final += text[idx:]
|
||||
return final
|
||||
|
||||
|
||||
def html_to_inner_content(html):
|
||||
"""Returns unformatted text after removing html tags and excessive whitespace from a
|
||||
string/Markup. Passed strings will first be sanitized.
|
||||
"""
|
||||
if is_html_empty(html):
|
||||
return ''
|
||||
if not isinstance(html, markupsafe.Markup):
|
||||
html = html_sanitize(html)
|
||||
processed = re.sub(HTML_NEWLINES_REGEX, ' ', html)
|
||||
processed = re.sub(HTML_TAGS_REGEX, '', processed)
|
||||
processed = re.sub(r' {2,}|\t', ' ', processed)
|
||||
processed = htmllib.unescape(processed)
|
||||
processed = processed.strip()
|
||||
return processed
|
||||
|
||||
|
||||
def create_link(url, label):
|
||||
return f'<a href="{url}" target="_blank" rel="noreferrer noopener">{label}</a>'
|
||||
|
||||
|
||||
def html2plaintext(html, body_id=None, encoding='utf-8'):
|
||||
""" From an HTML text, convert the HTML to plain text.
|
||||
If @param body_id is provided then this is the tag where the
|
||||
body (not necessarily <body>) starts.
|
||||
"""
|
||||
## (c) Fry-IT, www.fry-it.com, 2007
|
||||
## <peter@fry-it.com>
|
||||
## download here: http://www.peterbe.com/plog/html2plaintext
|
||||
|
||||
html = ustr(html)
|
||||
|
||||
if not html.strip():
|
||||
return ''
|
||||
|
||||
tree = etree.fromstring(html, parser=etree.HTMLParser())
|
||||
|
||||
if body_id is not None:
|
||||
source = tree.xpath('//*[@id=%s]' % (body_id,))
|
||||
else:
|
||||
source = tree.xpath('//body')
|
||||
if len(source):
|
||||
tree = source[0]
|
||||
|
||||
url_index = []
|
||||
i = 0
|
||||
for link in tree.findall('.//a'):
|
||||
url = link.get('href')
|
||||
if url:
|
||||
i += 1
|
||||
link.tag = 'span'
|
||||
link.text = '%s [%s]' % (link.text, i)
|
||||
url_index.append(url)
|
||||
|
||||
for img in tree.findall('.//img'):
|
||||
src = img.get('src')
|
||||
if src:
|
||||
i += 1
|
||||
img.tag = 'span'
|
||||
if src.startswith('data:'):
|
||||
img_name = None # base64 image
|
||||
else:
|
||||
img_name = re.search(r'[^/]+(?=\.[a-zA-Z]+(?:\?|$))', src)
|
||||
img.text = '%s [%s]' % (img_name.group(0) if img_name else 'Image', i)
|
||||
url_index.append(src)
|
||||
|
||||
html = ustr(etree.tostring(tree, encoding=encoding))
|
||||
# \r char is converted into , must remove it
|
||||
html = html.replace(' ', '')
|
||||
|
||||
html = html.replace('<strong>', '*').replace('</strong>', '*')
|
||||
html = html.replace('<b>', '*').replace('</b>', '*')
|
||||
html = html.replace('<h3>', '*').replace('</h3>', '*')
|
||||
html = html.replace('<h2>', '**').replace('</h2>', '**')
|
||||
html = html.replace('<h1>', '**').replace('</h1>', '**')
|
||||
html = html.replace('<em>', '/').replace('</em>', '/')
|
||||
html = html.replace('<tr>', '\n')
|
||||
html = html.replace('</p>', '\n')
|
||||
html = re.sub(r'<br\s*/?>', '\n', html)
|
||||
html = re.sub('<.*?>', ' ', html)
|
||||
html = html.replace(' ' * 2, ' ')
|
||||
html = html.replace('>', '>')
|
||||
html = html.replace('<', '<')
|
||||
html = html.replace('&', '&')
|
||||
html = html.replace(' ', u'\N{NO-BREAK SPACE}')
|
||||
|
||||
# strip all lines
|
||||
html = '\n'.join([x.strip() for x in html.splitlines()])
|
||||
html = html.replace('\n' * 2, '\n')
|
||||
|
||||
for i, url in enumerate(url_index):
|
||||
if i == 0:
|
||||
html += '\n\n'
|
||||
html += ustr('[%s] %s\n') % (i + 1, url)
|
||||
|
||||
return html.strip()
|
||||
|
||||
def plaintext2html(text, container_tag=None):
|
||||
r"""Convert plaintext into html. Content of the text is escaped to manage
|
||||
html entities, using :func:`~odoo.tools.misc.html_escape`.
|
||||
|
||||
- all ``\n``, ``\r`` are replaced by ``<br/>``
|
||||
- enclose content into ``<p>``
|
||||
- convert url into clickable link
|
||||
- 2 or more consecutive ``<br/>`` are considered as paragraph breaks
|
||||
|
||||
:param str text: plaintext to convert
|
||||
:param str container_tag: container of the html; by default the content is
|
||||
embedded into a ``<div>``
|
||||
:rtype: markupsafe.Markup
|
||||
"""
|
||||
text = misc.html_escape(ustr(text))
|
||||
|
||||
# 1. replace \n and \r
|
||||
text = re.sub(r'(\r\n|\r|\n)', '<br/>', text)
|
||||
|
||||
# 2. clickable links
|
||||
text = html_keep_url(text)
|
||||
|
||||
# 3-4: form paragraphs
|
||||
idx = 0
|
||||
final = '<p>'
|
||||
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*/?[>]\s*){2,})')
|
||||
for item in re.finditer(br_tags, text):
|
||||
final += text[idx:item.start()] + '</p><p>'
|
||||
idx = item.end()
|
||||
final += text[idx:] + '</p>'
|
||||
|
||||
# 5. container
|
||||
if container_tag: # FIXME: validate that container_tag is just a simple tag?
|
||||
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
|
||||
return markupsafe.Markup(final)
|
||||
|
||||
def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=None):
|
||||
""" Append extra content at the end of an HTML snippet, trying
|
||||
to locate the end of the HTML document (</body>, </html>, or
|
||||
EOF), and converting the provided content in html unless ``plaintext``
|
||||
is False.
|
||||
|
||||
Content conversion can be done in two ways:
|
||||
|
||||
- wrapping it into a pre (``preserve=True``)
|
||||
- use plaintext2html (``preserve=False``, using ``container_tag`` to
|
||||
wrap the whole content)
|
||||
|
||||
A side-effect of this method is to coerce all HTML tags to
|
||||
lowercase in ``html``, and strip enclosing <html> or <body> tags in
|
||||
content if ``plaintext`` is False.
|
||||
|
||||
:param str html: html tagsoup (doesn't have to be XHTML)
|
||||
:param str content: extra content to append
|
||||
:param bool plaintext: whether content is plaintext and should
|
||||
be wrapped in a <pre/> tag.
|
||||
:param bool preserve: if content is plaintext, wrap it into a <pre>
|
||||
instead of converting it into html
|
||||
:param str container_tag: tag to wrap the content into, defaults to `div`.
|
||||
:rtype: markupsafe.Markup
|
||||
"""
|
||||
html = ustr(html)
|
||||
if plaintext and preserve:
|
||||
content = u'\n<pre>%s</pre>\n' % misc.html_escape(ustr(content))
|
||||
elif plaintext:
|
||||
content = '\n%s\n' % plaintext2html(content, container_tag)
|
||||
else:
|
||||
content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content)
|
||||
content = u'\n%s\n' % ustr(content)
|
||||
# Force all tags to lowercase
|
||||
html = re.sub(r'(</?)(\w+)([ >])',
|
||||
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
|
||||
insert_location = html.find('</body>')
|
||||
if insert_location == -1:
|
||||
insert_location = html.find('</html>')
|
||||
if insert_location == -1:
|
||||
return markupsafe.Markup('%s%s' % (html, content))
|
||||
return markupsafe.Markup('%s%s%s' % (html[:insert_location], content, html[insert_location:]))
|
||||
|
||||
|
||||
def prepend_html_content(html_body, html_content):
|
||||
"""Prepend some HTML content at the beginning of an other HTML content."""
|
||||
replacement = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', html_content)
|
||||
html_content = markupsafe.Markup(replacement) if isinstance(html_content, markupsafe.Markup) else replacement
|
||||
html_content = html_content.strip()
|
||||
|
||||
body_match = re.search(r'<body[^>]*>', html_body) or re.search(r'<html[^>]*>', html_body)
|
||||
insert_index = body_match.end() if body_match else 0
|
||||
|
||||
return html_body[:insert_index] + html_content + html_body[insert_index:]
|
||||
|
||||
#----------------------------------------------------------
|
||||
# Emails
|
||||
#----------------------------------------------------------
|
||||
|
||||
# matches any email in a body of text
|
||||
email_re = re.compile(r"""([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,63})""", re.VERBOSE)
|
||||
|
||||
# matches a string containing only one email
|
||||
single_email_re = re.compile(r"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,63}$""", re.VERBOSE)
|
||||
|
||||
mail_header_msgid_re = re.compile('<[^<>]+>')
|
||||
|
||||
email_addr_escapes_re = re.compile(r'[\\"]')
|
||||
|
||||
def generate_tracking_message_id(res_id):
|
||||
"""Returns a string that can be used in the Message-ID RFC822 header field
|
||||
|
||||
Used to track the replies related to a given object thanks to the "In-Reply-To"
|
||||
or "References" fields that Mail User Agents will set.
|
||||
"""
|
||||
try:
|
||||
rnd = random.SystemRandom().random()
|
||||
except NotImplementedError:
|
||||
rnd = random.random()
|
||||
rndstr = ("%.15f" % rnd)[2:]
|
||||
return "<%s.%.15f-openerp-%s@%s>" % (rndstr, time.time(), res_id, socket.gethostname())
|
||||
|
||||
def email_split_tuples(text):
|
||||
""" Return a list of (name, email) address tuples found in ``text`` . Note
|
||||
that text should be an email header or a stringified email list as it may
|
||||
give broader results than expected on actual text. """
|
||||
def _parse_based_on_spaces(pair):
|
||||
""" With input 'name email@domain.com' (missing quotes for a formatting)
|
||||
getaddresses returns ('', 'name email@domain.com). This when having no
|
||||
name and an email a fallback to enhance parsing is to redo a getaddresses
|
||||
by replacing spaces by commas. The new email will be split into sub pairs
|
||||
allowing to find the email and name parts, allowing to make a new name /
|
||||
email pair. Emails should not contain spaces thus this is coherent with
|
||||
email formation. """
|
||||
name, email = pair
|
||||
if not name and email and ' ' in email:
|
||||
inside_pairs = getaddresses([email.replace(' ', ',')])
|
||||
name_parts, found_email = [], False
|
||||
for pair in inside_pairs:
|
||||
if pair[1] and '@' not in pair[1]:
|
||||
name_parts.append(pair[1])
|
||||
if pair[1] and '@' in pair[1]:
|
||||
found_email = pair[1]
|
||||
name, email = (' '.join(name_parts), found_email) if found_email else (name, email)
|
||||
return (name, email)
|
||||
|
||||
if not text:
|
||||
return []
|
||||
|
||||
# found valid pairs, filtering out failed parsing
|
||||
valid_pairs = [
|
||||
(addr[0], addr[1]) for addr in getaddresses([text])
|
||||
# getaddresses() returns '' when email parsing fails, and
|
||||
# sometimes returns emails without at least '@'. The '@'
|
||||
# is strictly required in RFC2822's `addr-spec`.
|
||||
if addr[1] and '@' in addr[1]
|
||||
]
|
||||
# corner case: returning '@gmail.com'-like email (see test_email_split)
|
||||
if any(pair[1].startswith('@') for pair in valid_pairs):
|
||||
filtered = [
|
||||
found_email for found_email in email_re.findall(text)
|
||||
if found_email and not found_email.startswith('@')
|
||||
]
|
||||
if filtered:
|
||||
valid_pairs = [('', found_email) for found_email in filtered]
|
||||
|
||||
return list(map(_parse_based_on_spaces, valid_pairs))
|
||||
|
||||
def email_split(text):
|
||||
""" Return a list of the email addresses found in ``text`` """
|
||||
if not text:
|
||||
return []
|
||||
return [email for (name, email) in email_split_tuples(text)]
|
||||
|
||||
def email_split_and_format(text):
|
||||
""" Return a list of email addresses found in ``text``, formatted using
|
||||
formataddr. """
|
||||
if not text:
|
||||
return []
|
||||
return [formataddr((name, email)) for (name, email) in email_split_tuples(text)]
|
||||
|
||||
def email_split_and_format_normalize(text):
|
||||
""" Same as 'email_split_and_format' but normalizing email. """
|
||||
return [
|
||||
formataddr(
|
||||
(name, _normalize_email(email))
|
||||
) for (name, email) in email_split_tuples(text)
|
||||
]
|
||||
|
||||
def email_normalize(text, strict=True):
|
||||
""" Sanitize and standardize email address entries. As of rfc5322 section
|
||||
3.4.1 local-part is case-sensitive. However most main providers do consider
|
||||
the local-part as case insensitive. With the introduction of smtp-utf8
|
||||
within odoo, this assumption is certain to fall short for international
|
||||
emails. We now consider that
|
||||
|
||||
* if local part is ascii: normalize still 'lower' ;
|
||||
* else: use as it, SMTP-UF8 is made for non-ascii local parts;
|
||||
|
||||
Concerning domain part of the address, as of v14 international domain (IDNA)
|
||||
are handled fine. The domain is always lowercase, lowering it is fine as it
|
||||
is probably an error. With the introduction of IDNA, there is an encoding
|
||||
that allow non-ascii characters to be encoded to ascii ones, using 'idna.encode'.
|
||||
|
||||
A normalized email is considered as :
|
||||
- having a left part + @ + a right part (the domain can be without '.something')
|
||||
- having no name before the address. Typically, having no 'Name <>'
|
||||
Ex:
|
||||
- Possible Input Email : 'Name <NaMe@DoMaIn.CoM>'
|
||||
- Normalized Output Email : 'name@domain.com'
|
||||
|
||||
:param boolean strict: if True, text should contain a single email
|
||||
(default behavior in stable 14+). If more than one email is found no
|
||||
normalized email is returned. If False the first found candidate is used
|
||||
e.g. if email is 'tony@e.com, "Tony2" <tony2@e.com>', result is either
|
||||
False (strict=True), either 'tony@e.com' (strict=False).
|
||||
|
||||
:return: False if no email found (or if more than 1 email found when being
|
||||
in strict mode); normalized email otherwise;
|
||||
"""
|
||||
emails = email_split(text)
|
||||
if not emails or (strict and len(emails) != 1):
|
||||
return False
|
||||
|
||||
return _normalize_email(emails[0])
|
||||
|
||||
def email_normalize_all(text):
|
||||
""" Tool method allowing to extract email addresses from a text input and returning
|
||||
normalized version of all found emails. If no email is found, a void list
|
||||
is returned.
|
||||
|
||||
e.g. if email is 'tony@e.com, "Tony2" <tony2@e.com' returned result is ['tony@e.com, tony2@e.com']
|
||||
|
||||
:return list: list of normalized emails found in text
|
||||
"""
|
||||
if not text:
|
||||
return []
|
||||
emails = email_split(text)
|
||||
return list(filter(None, [_normalize_email(email) for email in emails]))
|
||||
|
||||
def _normalize_email(email):
|
||||
""" As of rfc5322 section 3.4.1 local-part is case-sensitive. However most
|
||||
main providers do consider the local-part as case insensitive. With the
|
||||
introduction of smtp-utf8 within odoo, this assumption is certain to fall
|
||||
short for international emails. We now consider that
|
||||
|
||||
* if local part is ascii: normalize still 'lower' ;
|
||||
* else: use as it, SMTP-UF8 is made for non-ascii local parts;
|
||||
|
||||
Concerning domain part of the address, as of v14 international domain (IDNA)
|
||||
are handled fine. The domain is always lowercase, lowering it is fine as it
|
||||
is probably an error. With the introduction of IDNA, there is an encoding
|
||||
that allow non-ascii characters to be encoded to ascii ones, using 'idna.encode'.
|
||||
|
||||
A normalized email is considered as :
|
||||
- having a left part + @ + a right part (the domain can be without '.something')
|
||||
- having no name before the address. Typically, having no 'Name <>'
|
||||
Ex:
|
||||
- Possible Input Email : 'Name <NaMe@DoMaIn.CoM>'
|
||||
- Normalized Output Email : 'name@domain.com'
|
||||
"""
|
||||
local_part, at, domain = email.rpartition('@')
|
||||
try:
|
||||
local_part.encode('ascii')
|
||||
except UnicodeEncodeError:
|
||||
pass
|
||||
else:
|
||||
local_part = local_part.lower()
|
||||
return local_part + at + domain.lower()
|
||||
|
||||
def email_anonymize(normalized_email, *, redact_domain=False):
|
||||
"""
|
||||
Replace most charaters in the local part of the email address with
|
||||
'*' to hide the recipient, but keep enough characters for debugging
|
||||
purpose.
|
||||
|
||||
The email address must be normalized already.
|
||||
|
||||
>>> email_anonymize('admin@example.com')
|
||||
'a****@example.com'
|
||||
>>> email_anonymize('portal@example.com')
|
||||
'p***al@example.com'
|
||||
>>> email_anonymize('portal@example.com', redact_domain=True)
|
||||
'p***al@e******.com'
|
||||
"""
|
||||
if not normalized_email:
|
||||
return normalized_email
|
||||
|
||||
local, at, domain = normalized_email.partition('@')
|
||||
if len(local) <= 5:
|
||||
anon_local = local[:1] + '*' * (len(local) - 1)
|
||||
else:
|
||||
anon_local = local[:1] + '*' * (len(local) - 3) + local[-2:]
|
||||
|
||||
host, dot, tld = domain.rpartition('.')
|
||||
if redact_domain and not domain.startswith('[') and all((host, dot, tld)):
|
||||
anon_host = host[0] + '*' * (len(host) - 1)
|
||||
else:
|
||||
anon_host = host
|
||||
|
||||
return f'{anon_local}{at}{anon_host}{dot}{tld}'
|
||||
|
||||
def email_domain_extract(email):
|
||||
""" Extract the company domain to be used by IAP services notably. Domain
|
||||
is extracted from email information e.g:
|
||||
|
||||
- info@proximus.be -> proximus.be
|
||||
"""
|
||||
normalized_email = email_normalize(email)
|
||||
if normalized_email:
|
||||
return normalized_email.split('@')[1]
|
||||
return False
|
||||
|
||||
def email_domain_normalize(domain):
|
||||
"""Return the domain normalized or False if the domain is invalid."""
|
||||
if not domain or '@' in domain:
|
||||
return False
|
||||
|
||||
return domain.lower()
|
||||
|
||||
def url_domain_extract(url):
|
||||
""" Extract the company domain to be used by IAP services notably. Domain
|
||||
is extracted from an URL e.g:
|
||||
|
||||
- www.info.proximus.be -> proximus.be
|
||||
"""
|
||||
parser_results = urlparse(url)
|
||||
company_hostname = parser_results.hostname
|
||||
if company_hostname and '.' in company_hostname:
|
||||
return '.'.join(company_hostname.split('.')[-2:]) # remove subdomains
|
||||
return False
|
||||
|
||||
def email_escape_char(email_address):
|
||||
""" Escape problematic characters in the given email address string"""
|
||||
return email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_')
|
||||
|
||||
# was mail_thread.decode_header()
|
||||
def decode_message_header(message, header, separator=' '):
|
||||
return separator.join(h for h in message.get_all(header, []) if h)
|
||||
|
||||
def formataddr(pair, charset='utf-8'):
|
||||
"""Pretty format a 2-tuple of the form (realname, email_address).
|
||||
|
||||
If the first element of pair is falsy then only the email address
|
||||
is returned.
|
||||
|
||||
Set the charset to ascii to get a RFC-2822 compliant email. The
|
||||
realname will be base64 encoded (if necessary) and the domain part
|
||||
of the email will be punycode encoded (if necessary). The local part
|
||||
is left unchanged thus require the SMTPUTF8 extension when there are
|
||||
non-ascii characters.
|
||||
|
||||
>>> formataddr(('John Doe', 'johndoe@example.com'))
|
||||
'"John Doe" <johndoe@example.com>'
|
||||
|
||||
>>> formataddr(('', 'johndoe@example.com'))
|
||||
'johndoe@example.com'
|
||||
"""
|
||||
name, address = pair
|
||||
local, _, domain = address.rpartition('@')
|
||||
|
||||
try:
|
||||
domain.encode(charset)
|
||||
except UnicodeEncodeError:
|
||||
# rfc5890 - Internationalized Domain Names for Applications (IDNA)
|
||||
domain = idna.encode(domain).decode('ascii')
|
||||
|
||||
if name:
|
||||
try:
|
||||
name.encode(charset)
|
||||
except UnicodeEncodeError:
|
||||
# charset mismatch, encode as utf-8/base64
|
||||
# rfc2047 - MIME Message Header Extensions for Non-ASCII Text
|
||||
name = base64.b64encode(name.encode('utf-8')).decode('ascii')
|
||||
return f"=?utf-8?b?{name}?= <{local}@{domain}>"
|
||||
else:
|
||||
# ascii name, escape it if needed
|
||||
# rfc2822 - Internet Message Format
|
||||
# #section-3.4 - Address Specification
|
||||
name = email_addr_escapes_re.sub(r'\\\g<0>', name)
|
||||
return f'"{name}" <{local}@{domain}>'
|
||||
return f"{local}@{domain}"
|
||||
|
||||
def encapsulate_email(old_email, new_email):
|
||||
"""Change the FROM of the message and use the old one as name.
|
||||
|
||||
e.g.
|
||||
* Old From: "Admin" <admin@gmail.com>
|
||||
* New From: notifications@odoo.com
|
||||
* Output: "Admin" <notifications@odoo.com>
|
||||
"""
|
||||
old_email_split = getaddresses([old_email])
|
||||
if not old_email_split or not old_email_split[0]:
|
||||
return old_email
|
||||
|
||||
new_email_split = getaddresses([new_email])
|
||||
if not new_email_split or not new_email_split[0]:
|
||||
return
|
||||
|
||||
old_name, old_email = old_email_split[0]
|
||||
if old_name:
|
||||
name_part = old_name
|
||||
else:
|
||||
name_part = old_email.split("@")[0]
|
||||
|
||||
return formataddr((
|
||||
name_part,
|
||||
new_email_split[0][1],
|
||||
))
|
||||
|
||||
def unfold_references(msg_references):
|
||||
""" As it declared in [RFC2822] long header bodies can be "folded" using
|
||||
CRLF+WSP. Some mail clients split References header body which contains
|
||||
Message Ids by "\n ".
|
||||
|
||||
RFC2882: https://tools.ietf.org/html/rfc2822#section-2.2.3 """
|
||||
return [
|
||||
re.sub(r'[\r\n\t ]+', r'', ref) # "Unfold" buggy references
|
||||
for ref in mail_header_msgid_re.findall(msg_references)
|
||||
]
|
||||
222
odoo-bringout-oca-ocb-base/odoo/tools/mimetypes.py
Normal file
222
odoo-bringout-oca-ocb-base/odoo/tools/mimetypes.py
Normal file
|
|
@ -0,0 +1,222 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Mimetypes-related utilities
|
||||
|
||||
# TODO: reexport stdlib mimetypes?
|
||||
"""
|
||||
import collections
|
||||
import functools
|
||||
import io
|
||||
import logging
|
||||
import mimetypes
|
||||
import re
|
||||
import zipfile
|
||||
|
||||
__all__ = ['guess_mimetype']
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
# We define our own guess_mimetype implementation and if magic is available we
|
||||
# use it instead.
|
||||
|
||||
# discriminants for zip-based file formats
|
||||
_ooxml_dirs = {
|
||||
'word/': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
||||
'pt/': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
|
||||
'xl/': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
||||
}
|
||||
def _check_ooxml(data):
|
||||
with io.BytesIO(data) as f, zipfile.ZipFile(f) as z:
|
||||
filenames = z.namelist()
|
||||
# OOXML documents should have a [Content_Types].xml file for early
|
||||
# check that we're interested in this thing at all
|
||||
if '[Content_Types].xml' not in filenames:
|
||||
return False
|
||||
|
||||
# then there is a directory whose name denotes the type of the file:
|
||||
# word, pt (powerpoint) or xl (excel)
|
||||
for dirname, mime in _ooxml_dirs.items():
|
||||
if any(entry.startswith(dirname) for entry in filenames):
|
||||
return mime
|
||||
|
||||
return False
|
||||
|
||||
|
||||
# checks that a string looks kinda sorta like a mimetype
|
||||
_mime_validator = re.compile(r"""
|
||||
[\w-]+ # type-name
|
||||
/ # subtype separator
|
||||
[\w-]+ # registration facet or subtype
|
||||
(?:\.[\w-]+)* # optional faceted name
|
||||
(?:\+[\w-]+)? # optional structured syntax specifier
|
||||
""", re.VERBOSE)
|
||||
def _check_open_container_format(data):
|
||||
# Open Document Format for Office Applications (OpenDocument) Version 1.2
|
||||
#
|
||||
# Part 3: Packages
|
||||
# 3 Packages
|
||||
# 3.3 MIME Media Type
|
||||
with io.BytesIO(data) as f, zipfile.ZipFile(f) as z:
|
||||
# If a MIME media type for a document exists, then an OpenDocument
|
||||
# package should contain a file with name "mimetype".
|
||||
if 'mimetype' not in z.namelist():
|
||||
return False
|
||||
|
||||
# The content of this file shall be the ASCII encoded MIME media type
|
||||
# associated with the document.
|
||||
marcel = z.read('mimetype').decode('ascii')
|
||||
# check that it's not too long (RFC6838 § 4.2 restricts type and
|
||||
# subtype to 127 characters each + separator, strongly recommends
|
||||
# limiting them to 64 but does not require it) and that it looks a lot
|
||||
# like a valid mime type
|
||||
if len(marcel) < 256 and _mime_validator.match(marcel):
|
||||
return marcel
|
||||
|
||||
return False
|
||||
|
||||
_xls_pattern = re.compile(b"""
|
||||
\x09\x08\x10\x00\x00\x06\x05\x00
|
||||
| \xFD\xFF\xFF\xFF(\x10|\x1F|\x20|"|\\#|\\(|\\))
|
||||
""", re.VERBOSE)
|
||||
_ppt_pattern = re.compile(b"""
|
||||
\x00\x6E\x1E\xF0
|
||||
| \x0F\x00\xE8\x03
|
||||
| \xA0\x46\x1D\xF0
|
||||
| \xFD\xFF\xFF\xFF(\x0E|\x1C|\x43)\x00\x00\x00
|
||||
""", re.VERBOSE)
|
||||
def _check_olecf(data):
|
||||
""" Pre-OOXML Office formats are OLE Compound Files which all use the same
|
||||
file signature ("magic bytes") and should have a subheader at offset 512
|
||||
(0x200).
|
||||
|
||||
Subheaders taken from http://www.garykessler.net/library/file_sigs.html
|
||||
according to which Mac office files *may* have different subheaders. We'll
|
||||
ignore that.
|
||||
"""
|
||||
offset = 0x200
|
||||
if data.startswith(b'\xEC\xA5\xC1\x00', offset):
|
||||
return 'application/msword'
|
||||
# the _xls_pattern stuff doesn't seem to work correctly (the test file
|
||||
# only has a bunch of \xf* at offset 0x200), that apparently works
|
||||
elif b'Microsoft Excel' in data:
|
||||
return 'application/vnd.ms-excel'
|
||||
elif _ppt_pattern.match(data, offset):
|
||||
return 'application/vnd.ms-powerpoint'
|
||||
return False
|
||||
|
||||
|
||||
def _check_svg(data):
|
||||
"""This simply checks the existence of the opening and ending SVG tags"""
|
||||
if b'<svg' in data and b'/svg' in data:
|
||||
return 'image/svg+xml'
|
||||
|
||||
|
||||
# for "master" formats with many subformats, discriminants is a list of
|
||||
# functions, tried in order and the first non-falsy value returned is the
|
||||
# selected mime type. If all functions return falsy values, the master
|
||||
# mimetype is returned.
|
||||
_Entry = collections.namedtuple('_Entry', ['mimetype', 'signatures', 'discriminants'])
|
||||
_mime_mappings = (
|
||||
# pdf
|
||||
_Entry('application/pdf', [b'%PDF'], []),
|
||||
# jpg, jpeg, png, gif, bmp, jfif
|
||||
_Entry('image/jpeg', [b'\xFF\xD8\xFF\xE0', b'\xFF\xD8\xFF\xE2', b'\xFF\xD8\xFF\xE3', b'\xFF\xD8\xFF\xE1', b'\xFF\xD8\xFF\xDB'], []),
|
||||
_Entry('image/png', [b'\x89PNG\r\n\x1A\n'], []),
|
||||
_Entry('image/gif', [b'GIF87a', b'GIF89a'], []),
|
||||
_Entry('image/bmp', [b'BM'], []),
|
||||
_Entry('application/xml', [b'<'], [
|
||||
_check_svg,
|
||||
]),
|
||||
_Entry('image/x-icon', [b'\x00\x00\x01\x00'], []),
|
||||
# OLECF files in general (Word, Excel, PPT, default to word because why not?)
|
||||
_Entry('application/msword', [b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1', b'\x0D\x44\x4F\x43'], [
|
||||
_check_olecf
|
||||
]),
|
||||
# zip, but will include jar, odt, ods, odp, docx, xlsx, pptx, apk
|
||||
_Entry('application/zip', [b'PK\x03\x04'], [_check_ooxml, _check_open_container_format]),
|
||||
)
|
||||
def _odoo_guess_mimetype(bin_data, default='application/octet-stream'):
|
||||
""" Attempts to guess the mime type of the provided binary data, similar
|
||||
to but significantly more limited than libmagic
|
||||
|
||||
:param str bin_data: binary data to try and guess a mime type for
|
||||
:returns: matched mimetype or ``application/octet-stream`` if none matched
|
||||
"""
|
||||
# by default, guess the type using the magic number of file hex signature (like magic, but more limited)
|
||||
# see http://www.filesignatures.net/ for file signatures
|
||||
for entry in _mime_mappings:
|
||||
for signature in entry.signatures:
|
||||
if bin_data.startswith(signature):
|
||||
for discriminant in entry.discriminants:
|
||||
try:
|
||||
guess = discriminant(bin_data)
|
||||
if guess: return guess
|
||||
except Exception:
|
||||
# log-and-next
|
||||
_logger.getChild('guess_mimetype').warn(
|
||||
"Sub-checker '%s' of type '%s' failed",
|
||||
discriminant.__name__, entry.mimetype,
|
||||
exc_info=True
|
||||
)
|
||||
# if no discriminant or no discriminant matches, return
|
||||
# primary mime type
|
||||
return entry.mimetype
|
||||
return default
|
||||
|
||||
|
||||
try:
|
||||
import magic
|
||||
except ImportError:
|
||||
magic = None
|
||||
|
||||
if magic:
|
||||
# There are 2 python libs named 'magic' with incompatible api.
|
||||
# magic from pypi https://pypi.python.org/pypi/python-magic/
|
||||
if hasattr(magic, 'from_buffer'):
|
||||
_guesser = functools.partial(magic.from_buffer, mime=True)
|
||||
# magic from file(1) https://packages.debian.org/squeeze/python-magic
|
||||
elif hasattr(magic, 'open'):
|
||||
ms = magic.open(magic.MAGIC_MIME_TYPE)
|
||||
ms.load()
|
||||
_guesser = ms.buffer
|
||||
|
||||
def guess_mimetype(bin_data, default=None):
|
||||
mimetype = _guesser(bin_data[:1024])
|
||||
# upgrade incorrect mimetype to official one, fixed upstream
|
||||
# https://github.com/file/file/commit/1a08bb5c235700ba623ffa6f3c95938fe295b262
|
||||
if mimetype == 'image/svg':
|
||||
return 'image/svg+xml'
|
||||
return mimetype
|
||||
else:
|
||||
guess_mimetype = _odoo_guess_mimetype
|
||||
|
||||
|
||||
def neuter_mimetype(mimetype, user):
|
||||
wrong_type = 'ht' in mimetype or 'xml' in mimetype or 'svg' in mimetype
|
||||
if wrong_type and not user._is_system():
|
||||
return 'text/plain'
|
||||
return mimetype
|
||||
|
||||
def get_extension(filename):
|
||||
# A file has no extension if it has no dot (ignoring the leading one
|
||||
# of hidden files) or that what follow the last dot is not a single
|
||||
# word, e.g. "Mr. Doe"
|
||||
_stem, dot, ext = filename.lstrip('.').rpartition('.')
|
||||
if not dot or not ext.isalnum():
|
||||
return ''
|
||||
|
||||
# Assume all 4-chars extensions to be valid extensions even if it is
|
||||
# not known from the mimetypes database. In /etc/mime.types, only 7%
|
||||
# known extensions are longer.
|
||||
if len(ext) <= 4:
|
||||
return f'.{ext}'.lower()
|
||||
|
||||
# Use the mimetype database to determine the extension of the file.
|
||||
guessed_mimetype, guessed_ext = mimetypes.guess_type(filename)
|
||||
if guessed_ext:
|
||||
return guessed_ext
|
||||
if guessed_mimetype:
|
||||
return f'.{ext}'.lower()
|
||||
|
||||
# Unknown extension.
|
||||
return ''
|
||||
1804
odoo-bringout-oca-ocb-base/odoo/tools/misc.py
Normal file
1804
odoo-bringout-oca-ocb-base/odoo/tools/misc.py
Normal file
File diff suppressed because it is too large
Load diff
1804
odoo-bringout-oca-ocb-base/odoo/tools/misc.py.backup
Normal file
1804
odoo-bringout-oca-ocb-base/odoo/tools/misc.py.backup
Normal file
File diff suppressed because it is too large
Load diff
710
odoo-bringout-oca-ocb-base/odoo/tools/num2words_patch.py
Normal file
710
odoo-bringout-oca-ocb-base/odoo/tools/num2words_patch.py
Normal file
|
|
@ -0,0 +1,710 @@
|
|||
import decimal
|
||||
import math
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from decimal import ROUND_HALF_UP, Decimal
|
||||
from math import floor
|
||||
|
||||
# The following section of the code is used to monkey patch
|
||||
# the Arabic class of num2words package as there are some problems
|
||||
# upgrading the package to the newer version that fixed the bugs
|
||||
# so a temporary fix was to patch the old version with the code
|
||||
# from the new version manually.
|
||||
# The code is taken from num2words package: https://github.com/savoirfairelinux/num2words
|
||||
|
||||
|
||||
CURRENCY_SR = [("ريال", "ريالان", "ريالات", "ريالاً"),
|
||||
("هللة", "هللتان", "هللات", "هللة")]
|
||||
CURRENCY_EGP = [("جنيه", "جنيهان", "جنيهات", "جنيهاً"),
|
||||
("قرش", "قرشان", "قروش", "قرش")]
|
||||
CURRENCY_KWD = [("دينار", "ديناران", "دينارات", "ديناراً"),
|
||||
("فلس", "فلسان", "فلس", "فلس")]
|
||||
|
||||
ARABIC_ONES = [
|
||||
"", "واحد", "اثنان", "ثلاثة", "أربعة", "خمسة", "ستة", "سبعة", "ثمانية",
|
||||
"تسعة",
|
||||
"عشرة", "أحد عشر", "اثنا عشر", "ثلاثة عشر", "أربعة عشر", "خمسة عشر",
|
||||
"ستة عشر", "سبعة عشر", "ثمانية عشر",
|
||||
"تسعة عشر"
|
||||
]
|
||||
|
||||
|
||||
class Num2Word_Base:
|
||||
CURRENCY_FORMS = {}
|
||||
CURRENCY_ADJECTIVES = {}
|
||||
|
||||
def __init__(self):
|
||||
self.is_title = False
|
||||
self.precision = 2
|
||||
self.exclude_title = []
|
||||
self.negword = "(-) "
|
||||
self.pointword = "(.)"
|
||||
self.errmsg_nonnum = "type: %s not in [long, int, float]"
|
||||
self.errmsg_floatord = "Cannot treat float %s as ordinal."
|
||||
self.errmsg_negord = "Cannot treat negative num %s as ordinal."
|
||||
self.errmsg_toobig = "abs(%s) must be less than %s."
|
||||
|
||||
self.setup()
|
||||
|
||||
# uses cards
|
||||
if any(hasattr(self, field) for field in
|
||||
['high_numwords', 'mid_numwords', 'low_numwords']):
|
||||
self.cards = OrderedDict()
|
||||
self.set_numwords()
|
||||
self.MAXVAL = 1000 * next(iter(self.cards.keys()))
|
||||
|
||||
def set_numwords(self):
|
||||
self.set_high_numwords(self.high_numwords)
|
||||
self.set_mid_numwords(self.mid_numwords)
|
||||
self.set_low_numwords(self.low_numwords)
|
||||
|
||||
def set_high_numwords(self, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_mid_numwords(self, mid):
|
||||
for key, val in mid:
|
||||
self.cards[key] = val
|
||||
|
||||
def set_low_numwords(self, numwords):
|
||||
for word, n in zip(numwords, range(len(numwords) - 1, -1, -1)):
|
||||
self.cards[n] = word
|
||||
|
||||
def splitnum(self, value):
|
||||
for elem in self.cards:
|
||||
if elem > value:
|
||||
continue
|
||||
|
||||
out = []
|
||||
if value == 0:
|
||||
div, mod = 1, 0
|
||||
else:
|
||||
div, mod = divmod(value, elem)
|
||||
|
||||
if div == 1:
|
||||
out.append((self.cards[1], 1))
|
||||
else:
|
||||
if div == value: # The system tallies, eg Roman Numerals
|
||||
return [(div * self.cards[elem], div * elem)]
|
||||
out.append(self.splitnum(div))
|
||||
|
||||
out.append((self.cards[elem], elem))
|
||||
|
||||
if mod:
|
||||
out.append(self.splitnum(mod))
|
||||
|
||||
return out
|
||||
|
||||
def parse_minus(self, num_str):
|
||||
"""Detach minus and return it as symbol with new num_str."""
|
||||
if num_str.startswith('-'):
|
||||
# Extra spacing to compensate if there is no minus.
|
||||
return '%s ' % self.negword.strip(), num_str[1:]
|
||||
return '', num_str
|
||||
|
||||
def str_to_number(self, value):
|
||||
return Decimal(value)
|
||||
|
||||
def to_cardinal(self, value):
|
||||
try:
|
||||
assert int(value) == value
|
||||
except (ValueError, TypeError, AssertionError):
|
||||
return self.to_cardinal_float(value)
|
||||
|
||||
out = ""
|
||||
if value < 0:
|
||||
value = abs(value)
|
||||
out = "%s " % self.negword.strip()
|
||||
|
||||
if value >= self.MAXVAL:
|
||||
raise OverflowError(self.errmsg_toobig % (value, self.MAXVAL))
|
||||
|
||||
val = self.splitnum(value)
|
||||
words, _ = self.clean(val)
|
||||
return self.title(out + words)
|
||||
|
||||
def float2tuple(self, value):
|
||||
pre = int(value)
|
||||
|
||||
# Simple way of finding decimal places to update the precision
|
||||
self.precision = abs(Decimal(str(value)).as_tuple().exponent)
|
||||
|
||||
post = abs(value - pre) * 10**self.precision
|
||||
if abs(round(post) - post) < 0.01:
|
||||
# We generally floor all values beyond our precision (rather than
|
||||
# rounding), but in cases where we have something like 1.239999999,
|
||||
# which is probably due to python's handling of floats, we actually
|
||||
# want to consider it as 1.24 instead of 1.23
|
||||
post = int(round(post))
|
||||
else:
|
||||
post = int(math.floor(post))
|
||||
|
||||
return pre, post
|
||||
|
||||
def to_cardinal_float(self, value):
|
||||
try:
|
||||
float(value) == value
|
||||
except (ValueError, TypeError, AssertionError, AttributeError):
|
||||
raise TypeError(self.errmsg_nonnum % value)
|
||||
|
||||
pre, post = self.float2tuple(float(value))
|
||||
|
||||
post = str(post)
|
||||
post = '0' * (self.precision - len(post)) + post
|
||||
|
||||
out = [self.to_cardinal(pre)]
|
||||
if self.precision:
|
||||
out.append(self.title(self.pointword))
|
||||
|
||||
for i in range(self.precision):
|
||||
curr = int(post[i])
|
||||
out.append(to_s(self.to_cardinal(curr)))
|
||||
|
||||
return " ".join(out)
|
||||
|
||||
def merge(self, left, right):
|
||||
raise NotImplementedError
|
||||
|
||||
def clean(self, val):
|
||||
out = val
|
||||
while len(val) != 1:
|
||||
out = []
|
||||
left, right = val[:2]
|
||||
if isinstance(left, tuple) and isinstance(right, tuple):
|
||||
out.append(self.merge(left, right))
|
||||
if val[2:]:
|
||||
out.append(val[2:])
|
||||
else:
|
||||
for elem in val:
|
||||
if isinstance(elem, list):
|
||||
if len(elem) == 1:
|
||||
out.append(elem[0])
|
||||
else:
|
||||
out.append(self.clean(elem))
|
||||
else:
|
||||
out.append(elem)
|
||||
val = out
|
||||
return out[0]
|
||||
|
||||
def title(self, value):
|
||||
if self.is_title:
|
||||
out = []
|
||||
value = value.split()
|
||||
for word in value:
|
||||
if word in self.exclude_title:
|
||||
out.append(word)
|
||||
else:
|
||||
out.append(word[0].upper() + word[1:])
|
||||
value = " ".join(out)
|
||||
return value
|
||||
|
||||
def verify_ordinal(self, value):
|
||||
if not value == int(value):
|
||||
raise TypeError(self.errmsg_floatord % value)
|
||||
if not abs(value) == value:
|
||||
raise TypeError(self.errmsg_negord % value)
|
||||
|
||||
def to_ordinal(self, value):
|
||||
return self.to_cardinal(value)
|
||||
|
||||
def to_ordinal_num(self, value):
|
||||
return value
|
||||
|
||||
# Trivial version
|
||||
def inflect(self, value, text):
|
||||
text = text.split("/")
|
||||
if value == 1:
|
||||
return text[0]
|
||||
return "".join(text)
|
||||
|
||||
# //CHECK: generalise? Any others like pounds/shillings/pence?
|
||||
def to_splitnum(self, val, hightxt="", lowtxt="", jointxt="",
|
||||
divisor=100, longval=True, cents=True):
|
||||
out = []
|
||||
|
||||
if isinstance(val, float):
|
||||
high, low = self.float2tuple(val)
|
||||
else:
|
||||
try:
|
||||
high, low = val
|
||||
except TypeError:
|
||||
high, low = divmod(val, divisor)
|
||||
|
||||
if high:
|
||||
hightxt = self.title(self.inflect(high, hightxt))
|
||||
out.append(self.to_cardinal(high))
|
||||
if low:
|
||||
if longval:
|
||||
if hightxt:
|
||||
out.append(hightxt)
|
||||
if jointxt:
|
||||
out.append(self.title(jointxt))
|
||||
elif hightxt:
|
||||
out.append(hightxt)
|
||||
|
||||
if low:
|
||||
if cents:
|
||||
out.append(self.to_cardinal(low))
|
||||
else:
|
||||
out.append("%02d" % low)
|
||||
if lowtxt and longval:
|
||||
out.append(self.title(self.inflect(low, lowtxt)))
|
||||
|
||||
return " ".join(out)
|
||||
|
||||
def to_year(self, value, **kwargs):
|
||||
return self.to_cardinal(value)
|
||||
|
||||
def pluralize(self, n, forms):
|
||||
"""
|
||||
Should resolve gettext form:
|
||||
http://docs.translatehouse.org/projects/localization-guide/en/latest/l10n/pluralforms.html
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _money_verbose(self, number, currency):
|
||||
return self.to_cardinal(number)
|
||||
|
||||
def _cents_verbose(self, number, currency):
|
||||
return self.to_cardinal(number)
|
||||
|
||||
def _cents_terse(self, number, currency):
|
||||
return "%02d" % number
|
||||
|
||||
def to_currency(self, val, currency='EUR', cents=True, separator=',',
|
||||
adjective=False):
|
||||
"""
|
||||
Args:
|
||||
val: Numeric value
|
||||
currency (str): Currency code
|
||||
cents (bool): Verbose cents
|
||||
separator (str): Cent separator
|
||||
adjective (bool): Prefix currency name with adjective
|
||||
Returns:
|
||||
str: Formatted string
|
||||
|
||||
"""
|
||||
left, right, is_negative = parse_currency_parts(val)
|
||||
|
||||
try:
|
||||
cr1, cr2 = self.CURRENCY_FORMS[currency]
|
||||
|
||||
except KeyError:
|
||||
raise NotImplementedError(
|
||||
'Currency code "%s" not implemented for "%s"' %
|
||||
(currency, self.__class__.__name__))
|
||||
|
||||
if adjective and currency in self.CURRENCY_ADJECTIVES:
|
||||
cr1 = prefix_currency(self.CURRENCY_ADJECTIVES[currency], cr1)
|
||||
|
||||
minus_str = "%s " % self.negword.strip() if is_negative else ""
|
||||
money_str = self._money_verbose(left, currency)
|
||||
cents_str = self._cents_verbose(right, currency) \
|
||||
if cents else self._cents_terse(right, currency)
|
||||
|
||||
return '%s%s %s%s %s %s' % (
|
||||
minus_str,
|
||||
money_str,
|
||||
self.pluralize(left, cr1),
|
||||
separator,
|
||||
cents_str,
|
||||
self.pluralize(right, cr2)
|
||||
)
|
||||
|
||||
def setup(self):
|
||||
pass
|
||||
|
||||
|
||||
class Num2Word_AR_Fixed(Num2Word_Base):
|
||||
errmsg_toobig = "abs(%s) must be less than %s."
|
||||
MAXVAL = 10**51
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.number = 0
|
||||
self.arabicPrefixText = ""
|
||||
self.arabicSuffixText = ""
|
||||
self.integer_value = 0
|
||||
self._decimalValue = ""
|
||||
self.partPrecision = 2
|
||||
self.currency_unit = CURRENCY_SR[0]
|
||||
self.currency_subunit = CURRENCY_SR[1]
|
||||
self.isCurrencyPartNameFeminine = True
|
||||
self.isCurrencyNameFeminine = False
|
||||
self.separator = 'و'
|
||||
|
||||
self.arabicOnes = ARABIC_ONES
|
||||
self.arabicFeminineOnes = [
|
||||
"", "إحدى", "اثنتان", "ثلاث", "أربع", "خمس", "ست", "سبع", "ثمان",
|
||||
"تسع",
|
||||
"عشر", "إحدى عشرة", "اثنتا عشرة", "ثلاث عشرة", "أربع عشرة",
|
||||
"خمس عشرة", "ست عشرة", "سبع عشرة", "ثماني عشرة",
|
||||
"تسع عشرة"
|
||||
]
|
||||
self.arabicOrdinal = [
|
||||
"", "اول", "ثاني", "ثالث", "رابع", "خامس", "سادس", "سابع", "ثامن",
|
||||
"تاسع", "عاشر", "حادي عشر", "ثاني عشر", "ثالث عشر", "رابع عشر",
|
||||
"خامس عشر", "سادس عشر", "سابع عشر", "ثامن عشر", "تاسع عشر"
|
||||
]
|
||||
self.arabicTens = [
|
||||
"عشرون", "ثلاثون", "أربعون", "خمسون", "ستون", "سبعون", "ثمانون",
|
||||
"تسعون"
|
||||
]
|
||||
self.arabicHundreds = [
|
||||
"", "مائة", "مئتان", "ثلاثمائة", "أربعمائة", "خمسمائة", "ستمائة",
|
||||
"سبعمائة", "ثمانمائة", "تسعمائة"
|
||||
]
|
||||
|
||||
self.arabicAppendedTwos = [
|
||||
"مئتا", "ألفا", "مليونا", "مليارا", "تريليونا", "كوادريليونا",
|
||||
"كوينتليونا", "سكستيليونا", "سبتيليونا", "أوكتيليونا ",
|
||||
"نونيليونا", "ديسيليونا", "أندسيليونا", "دوديسيليونا",
|
||||
"تريديسيليونا", "كوادريسيليونا", "كوينتينيليونا"
|
||||
]
|
||||
self.arabicTwos = [
|
||||
"مئتان", "ألفان", "مليونان", "ملياران", "تريليونان",
|
||||
"كوادريليونان", "كوينتليونان", "سكستيليونان", "سبتيليونان",
|
||||
"أوكتيليونان ", "نونيليونان ", "ديسيليونان", "أندسيليونان",
|
||||
"دوديسيليونان", "تريديسيليونان", "كوادريسيليونان", "كوينتينيليونان"
|
||||
]
|
||||
self.arabicGroup = [
|
||||
"مائة", "ألف", "مليون", "مليار", "تريليون", "كوادريليون",
|
||||
"كوينتليون", "سكستيليون", "سبتيليون", "أوكتيليون", "نونيليون",
|
||||
"ديسيليون", "أندسيليون", "دوديسيليون", "تريديسيليون",
|
||||
"كوادريسيليون", "كوينتينيليون"
|
||||
]
|
||||
self.arabicAppendedGroup = [
|
||||
"", "ألفاً", "مليوناً", "ملياراً", "تريليوناً", "كوادريليوناً",
|
||||
"كوينتليوناً", "سكستيليوناً", "سبتيليوناً", "أوكتيليوناً",
|
||||
"نونيليوناً", "ديسيليوناً", "أندسيليوناً", "دوديسيليوناً",
|
||||
"تريديسيليوناً", "كوادريسيليوناً", "كوينتينيليوناً"
|
||||
]
|
||||
self.arabicPluralGroups = [
|
||||
"", "آلاف", "ملايين", "مليارات", "تريليونات", "كوادريليونات",
|
||||
"كوينتليونات", "سكستيليونات", "سبتيليونات", "أوكتيليونات",
|
||||
"نونيليونات", "ديسيليونات", "أندسيليونات", "دوديسيليونات",
|
||||
"تريديسيليونات", "كوادريسيليونات", "كوينتينيليونات"
|
||||
]
|
||||
assert len(self.arabicAppendedGroup) == len(self.arabicGroup)
|
||||
assert len(self.arabicPluralGroups) == len(self.arabicGroup)
|
||||
assert len(self.arabicAppendedTwos) == len(self.arabicTwos)
|
||||
|
||||
def number_to_arabic(self, arabic_prefix_text, arabic_suffix_text):
|
||||
self.arabicPrefixText = arabic_prefix_text
|
||||
self.arabicSuffixText = arabic_suffix_text
|
||||
self.extract_integer_and_decimal_parts()
|
||||
|
||||
def extract_integer_and_decimal_parts(self):
|
||||
splits = re.split('\\.', str(self.number))
|
||||
|
||||
self.integer_value = int(splits[0])
|
||||
if len(splits) > 1:
|
||||
self._decimalValue = int(self.decimal_value(splits[1]))
|
||||
else:
|
||||
self._decimalValue = 0
|
||||
|
||||
def decimal_value(self, decimal_part):
|
||||
if self.partPrecision is not len(decimal_part):
|
||||
decimal_part_length = len(decimal_part)
|
||||
|
||||
decimal_part_builder = decimal_part
|
||||
for _ in range(0, self.partPrecision - decimal_part_length):
|
||||
decimal_part_builder += '0'
|
||||
decimal_part = decimal_part_builder
|
||||
|
||||
if len(decimal_part) <= self.partPrecision:
|
||||
dec = len(decimal_part)
|
||||
else:
|
||||
dec = self.partPrecision
|
||||
result = decimal_part[0: dec]
|
||||
else:
|
||||
result = decimal_part
|
||||
|
||||
# The following is useless (never happens)
|
||||
# for i in range(len(result), self.partPrecision):
|
||||
# result += '0'
|
||||
return result
|
||||
|
||||
def digit_feminine_status(self, digit, group_level):
|
||||
if group_level == -1:
|
||||
if self.isCurrencyPartNameFeminine:
|
||||
return self.arabicFeminineOnes[int(digit)]
|
||||
else:
|
||||
# Note: this never happens
|
||||
return self.arabicOnes[int(digit)]
|
||||
elif group_level == 0:
|
||||
if self.isCurrencyNameFeminine:
|
||||
return self.arabicFeminineOnes[int(digit)]
|
||||
else:
|
||||
return self.arabicOnes[int(digit)]
|
||||
else:
|
||||
return self.arabicOnes[int(digit)]
|
||||
|
||||
def process_arabic_group(self, group_number, group_level,
|
||||
remaining_number):
|
||||
tens = Decimal(group_number) % Decimal(100)
|
||||
hundreds = Decimal(group_number) / Decimal(100)
|
||||
ret_val = ""
|
||||
|
||||
if int(hundreds) > 0:
|
||||
if tens == 0 and int(hundreds) == 2:
|
||||
ret_val = f"{self.arabicAppendedTwos[0]}"
|
||||
else:
|
||||
ret_val = f"{self.arabicHundreds[int(hundreds)]}"
|
||||
if ret_val and tens != 0:
|
||||
ret_val += " و "
|
||||
|
||||
if tens > 0:
|
||||
if tens < 20:
|
||||
# if int(group_level) >= len(self.arabicTwos):
|
||||
# raise OverflowError(self.errmsg_toobig %
|
||||
# (self.number, self.MAXVAL))
|
||||
assert int(group_level) < len(self.arabicTwos)
|
||||
if tens == 2 and int(hundreds) == 0 and group_level > 0:
|
||||
power = int(math.log10(self.integer_value))
|
||||
if self.integer_value > 10 and power % 3 == 0 and \
|
||||
self.integer_value == 2 * (10 ** power):
|
||||
ret_val = f"{self.arabicAppendedTwos[int(group_level)]}"
|
||||
else:
|
||||
ret_val = f"{self.arabicTwos[int(group_level)]}"
|
||||
else:
|
||||
if tens == 1 and group_level > 0 and hundreds == 0:
|
||||
# Note: this never happens
|
||||
# (hundreds == 0 only if group_number is 0)
|
||||
ret_val += ""
|
||||
elif (tens == 1 or tens == 2) and (
|
||||
group_level == 0 or group_level == -1) and \
|
||||
hundreds == 0 and remaining_number == 0:
|
||||
# Note: this never happens (idem)
|
||||
ret_val += ""
|
||||
elif tens == 1 and group_level > 0:
|
||||
ret_val += self.arabicGroup[int(group_level)]
|
||||
else:
|
||||
ret_val += self.digit_feminine_status(int(tens),
|
||||
group_level)
|
||||
else:
|
||||
ones = tens % 10
|
||||
tens = (tens / 10) - 2
|
||||
if ones > 0:
|
||||
ret_val += self.digit_feminine_status(ones, group_level)
|
||||
if ret_val and ones != 0:
|
||||
ret_val += " و "
|
||||
|
||||
ret_val += self.arabicTens[int(tens)]
|
||||
|
||||
return ret_val
|
||||
|
||||
# We use this instead of built-in `abs` function,
|
||||
# because `abs` suffers from loss of precision for big numbers
|
||||
def absolute(self, number):
|
||||
return number if number >= 0 else -number
|
||||
|
||||
# We use this instead of `"{:09d}".format(number)`,
|
||||
# because the string conversion suffers from loss of
|
||||
# precision for big numbers
|
||||
def to_str(self, number):
|
||||
integer = int(number)
|
||||
if integer == number:
|
||||
return str(integer)
|
||||
decimal = round((number - integer) * 10**9)
|
||||
return f"{integer}.{decimal:09d}"
|
||||
|
||||
def convert(self, value):
|
||||
self.number = self.to_str(value)
|
||||
self.number_to_arabic(self.arabicPrefixText, self.arabicSuffixText)
|
||||
return self.convert_to_arabic()
|
||||
|
||||
def convert_to_arabic(self):
|
||||
temp_number = Decimal(self.number)
|
||||
|
||||
if temp_number == Decimal(0):
|
||||
return "صفر"
|
||||
|
||||
decimal_string = self.process_arabic_group(self._decimalValue,
|
||||
-1,
|
||||
Decimal(0))
|
||||
ret_val = ""
|
||||
group = 0
|
||||
|
||||
while temp_number > Decimal(0):
|
||||
|
||||
temp_number_dec = Decimal(str(temp_number))
|
||||
try:
|
||||
number_to_process = int(temp_number_dec % Decimal(str(1000)))
|
||||
except decimal.InvalidOperation:
|
||||
decimal.getcontext().prec = len(
|
||||
temp_number_dec.as_tuple().digits
|
||||
)
|
||||
number_to_process = int(temp_number_dec % Decimal(str(1000)))
|
||||
|
||||
temp_number = int(temp_number_dec / Decimal(1000))
|
||||
|
||||
group_description = \
|
||||
self.process_arabic_group(number_to_process,
|
||||
group,
|
||||
Decimal(floor(temp_number)))
|
||||
if group_description:
|
||||
if group > 0:
|
||||
if ret_val:
|
||||
ret_val = f"و {ret_val}"
|
||||
if number_to_process != 2 and number_to_process != 1:
|
||||
# if group >= len(self.arabicGroup):
|
||||
# raise OverflowError(self.errmsg_toobig %
|
||||
# (self.number, self.MAXVAL)
|
||||
# )
|
||||
assert group < len(self.arabicGroup)
|
||||
if number_to_process % 100 != 1:
|
||||
if 3 <= number_to_process <= 10:
|
||||
ret_val = f"{self.arabicPluralGroups[group]} {ret_val}"
|
||||
else:
|
||||
if ret_val:
|
||||
ret_val = f"{self.arabicAppendedGroup[group]} {ret_val}"
|
||||
else:
|
||||
ret_val = f"{self.arabicGroup[group]} {ret_val}"
|
||||
|
||||
else:
|
||||
ret_val = f"{self.arabicGroup[group]} {ret_val}"
|
||||
|
||||
ret_val = f"{group_description} {ret_val}"
|
||||
group += 1
|
||||
formatted_number = ""
|
||||
if self.arabicPrefixText:
|
||||
formatted_number += f"{self.arabicPrefixText} "
|
||||
formatted_number += ret_val
|
||||
if self.integer_value != 0:
|
||||
remaining100 = int(self.integer_value % 100)
|
||||
|
||||
if remaining100 == 0 or remaining100 == 1:
|
||||
formatted_number += self.currency_unit[0]
|
||||
elif remaining100 == 2:
|
||||
if self.integer_value == 2:
|
||||
formatted_number += self.currency_unit[1]
|
||||
else:
|
||||
formatted_number += self.currency_unit[0]
|
||||
elif 3 <= remaining100 <= 10:
|
||||
formatted_number += self.currency_unit[2]
|
||||
elif 11 <= remaining100 <= 99:
|
||||
formatted_number += self.currency_unit[3]
|
||||
if self._decimalValue != 0:
|
||||
formatted_number += f" {self.separator} "
|
||||
formatted_number += decimal_string
|
||||
|
||||
if self._decimalValue != 0:
|
||||
formatted_number += " "
|
||||
remaining100 = int(self._decimalValue % 100)
|
||||
|
||||
if remaining100 == 0 or remaining100 == 1:
|
||||
formatted_number += self.currency_subunit[0]
|
||||
elif remaining100 == 2:
|
||||
formatted_number += self.currency_subunit[1]
|
||||
elif 3 <= remaining100 <= 10:
|
||||
formatted_number += self.currency_subunit[2]
|
||||
elif 11 <= remaining100 <= 99:
|
||||
formatted_number += self.currency_subunit[3]
|
||||
|
||||
if self.arabicSuffixText:
|
||||
formatted_number += f" {self.arabicSuffixText}"
|
||||
|
||||
return formatted_number
|
||||
|
||||
def validate_number(self, number):
|
||||
if number >= self.MAXVAL:
|
||||
raise OverflowError(self.errmsg_toobig % (number, self.MAXVAL))
|
||||
return number
|
||||
|
||||
def set_currency_prefer(self, currency):
|
||||
if currency == 'EGP':
|
||||
self.currency_unit = CURRENCY_EGP[0]
|
||||
self.currency_subunit = CURRENCY_EGP[1]
|
||||
elif currency == 'KWD':
|
||||
self.currency_unit = CURRENCY_KWD[0]
|
||||
self.currency_subunit = CURRENCY_KWD[1]
|
||||
else:
|
||||
self.currency_unit = CURRENCY_SR[0]
|
||||
self.currency_subunit = CURRENCY_SR[1]
|
||||
|
||||
def to_currency(self, value, currency='SR', prefix='', suffix=''):
|
||||
self.set_currency_prefer(currency)
|
||||
self.isCurrencyNameFeminine = False
|
||||
self.separator = "و"
|
||||
self.arabicOnes = ARABIC_ONES
|
||||
self.arabicPrefixText = prefix
|
||||
self.arabicSuffixText = suffix
|
||||
return self.convert(value=value)
|
||||
|
||||
def to_ordinal(self, number, prefix=''):
|
||||
if number <= 19:
|
||||
return f"{self.arabicOrdinal[number]}"
|
||||
if number < 100:
|
||||
self.isCurrencyNameFeminine = True
|
||||
else:
|
||||
self.isCurrencyNameFeminine = False
|
||||
self.currency_subunit = ('', '', '', '')
|
||||
self.currency_unit = ('', '', '', '')
|
||||
self.arabicPrefixText = prefix
|
||||
self.arabicSuffixText = ""
|
||||
return f"{self.convert(self.absolute(number)).strip()}"
|
||||
|
||||
def to_year(self, value):
|
||||
value = self.validate_number(value)
|
||||
return self.to_cardinal(value)
|
||||
|
||||
def to_ordinal_num(self, value):
|
||||
return self.to_ordinal(value).strip()
|
||||
|
||||
def to_cardinal(self, number):
|
||||
self.isCurrencyNameFeminine = False
|
||||
number = self.validate_number(number)
|
||||
minus = ''
|
||||
if number < 0:
|
||||
minus = 'سالب '
|
||||
self.separator = ','
|
||||
self.currency_subunit = ('', '', '', '')
|
||||
self.currency_unit = ('', '', '', '')
|
||||
self.arabicPrefixText = ""
|
||||
self.arabicSuffixText = ""
|
||||
self.arabicOnes = ARABIC_ONES
|
||||
return minus + self.convert(value=self.absolute(number)).strip()
|
||||
|
||||
|
||||
def parse_currency_parts(value, is_int_with_cents=True):
|
||||
if isinstance(value, int):
|
||||
if is_int_with_cents:
|
||||
# assume cents if value is integer
|
||||
negative = value < 0
|
||||
value = abs(value)
|
||||
integer, cents = divmod(value, 100)
|
||||
else:
|
||||
negative = value < 0
|
||||
integer, cents = abs(value), 0
|
||||
|
||||
else:
|
||||
value = Decimal(value)
|
||||
value = value.quantize(
|
||||
Decimal('.01'),
|
||||
rounding=ROUND_HALF_UP
|
||||
)
|
||||
negative = value < 0
|
||||
value = abs(value)
|
||||
integer, fraction = divmod(value, 1)
|
||||
integer = int(integer)
|
||||
cents = int(fraction * 100)
|
||||
|
||||
return integer, cents, negative
|
||||
|
||||
|
||||
def prefix_currency(prefix, base):
|
||||
return tuple("%s %s" % (prefix, i) for i in base)
|
||||
|
||||
|
||||
try:
|
||||
strtype = basestring
|
||||
except NameError:
|
||||
strtype = str
|
||||
|
||||
|
||||
def to_s(val):
|
||||
try:
|
||||
return unicode(val)
|
||||
except NameError:
|
||||
return str(val)
|
||||
115
odoo-bringout-oca-ocb-base/odoo/tools/osutil.py
Normal file
115
odoo-bringout-oca-ocb-base/odoo/tools/osutil.py
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
"""
|
||||
Some functions related to the os and os.path module
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import warnings
|
||||
import zipfile
|
||||
|
||||
from os.path import join as opj
|
||||
|
||||
|
||||
WINDOWS_RESERVED = re.compile(r'''
|
||||
^
|
||||
# forbidden stems: reserved keywords
|
||||
(:?CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9])
|
||||
# even with an extension this is recommended against
|
||||
(:?\..*)?
|
||||
$
|
||||
''', flags=re.IGNORECASE | re.VERBOSE)
|
||||
def clean_filename(name, replacement=''):
|
||||
""" Strips or replaces possibly problematic or annoying characters our of
|
||||
the input string, in order to make it a valid filename in most operating
|
||||
systems (including dropping reserved Windows filenames).
|
||||
|
||||
If this results in an empty string, results in "Untitled" (localized).
|
||||
|
||||
Allows:
|
||||
|
||||
* any alphanumeric character (unicode)
|
||||
* underscore (_) as that's innocuous
|
||||
* dot (.) except in leading position to avoid creating dotfiles
|
||||
* dash (-) except in leading position to avoid annoyance / confusion with
|
||||
command options
|
||||
* brackets ([ and ]), while they correspond to shell *character class*
|
||||
they're a common way to mark / tag files especially on windows
|
||||
* parenthesis ("(" and ")"), a more natural though less common version of
|
||||
the former
|
||||
* space (" ")
|
||||
|
||||
:param str name: file name to clean up
|
||||
:param str replacement:
|
||||
replacement string to use for sequences of problematic input, by default
|
||||
an empty string to remove them entirely, each contiguous sequence of
|
||||
problems is replaced by a single replacement
|
||||
:rtype: str
|
||||
"""
|
||||
if WINDOWS_RESERVED.match(name):
|
||||
return "Untitled"
|
||||
return re.sub(r'[^\w_.()\[\] -]+', replacement, name).lstrip('.-') or "Untitled"
|
||||
|
||||
def listdir(dir, recursive=False):
|
||||
"""Allow to recursively get the file listing following symlinks, returns
|
||||
paths relative to the provided `dir` except completely broken if the symlink
|
||||
it follows leaves `dir`...
|
||||
"""
|
||||
assert recursive, "use `os.listdir` or `pathlib.Path.iterdir`"
|
||||
warnings.warn("Since 16.0, use os.walk or a recursive glob", DeprecationWarning, stacklevel=2)
|
||||
dir = os.path.normpath(dir)
|
||||
|
||||
res = []
|
||||
for root, _, files in os.walk(dir, followlinks=True):
|
||||
r = os.path.relpath(root, dir)
|
||||
yield from (opj(r, f) for f in files)
|
||||
return res
|
||||
|
||||
def zip_dir(path, stream, include_dir=True, fnct_sort=None): # TODO add ignore list
|
||||
"""
|
||||
: param fnct_sort : Function to be passed to "key" parameter of built-in
|
||||
python sorted() to provide flexibility of sorting files
|
||||
inside ZIP archive according to specific requirements.
|
||||
"""
|
||||
path = os.path.normpath(path)
|
||||
len_prefix = len(os.path.dirname(path)) if include_dir else len(path)
|
||||
if len_prefix:
|
||||
len_prefix += 1
|
||||
|
||||
with zipfile.ZipFile(stream, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zipf:
|
||||
for dirpath, dirnames, filenames in os.walk(path):
|
||||
filenames = sorted(filenames, key=fnct_sort)
|
||||
for fname in filenames:
|
||||
bname, ext = os.path.splitext(fname)
|
||||
ext = ext or bname
|
||||
if ext not in ['.pyc', '.pyo', '.swp', '.DS_Store']:
|
||||
path = os.path.normpath(os.path.join(dirpath, fname))
|
||||
if os.path.isfile(path):
|
||||
zipf.write(path, path[len_prefix:])
|
||||
|
||||
|
||||
if os.name != 'nt':
|
||||
is_running_as_nt_service = lambda: False
|
||||
else:
|
||||
import win32service as ws
|
||||
import win32serviceutil as wsu
|
||||
|
||||
from contextlib import contextmanager
|
||||
from odoo.release import nt_service_name
|
||||
|
||||
def is_running_as_nt_service():
|
||||
@contextmanager
|
||||
def close_srv(srv):
|
||||
try:
|
||||
yield srv
|
||||
finally:
|
||||
ws.CloseServiceHandle(srv)
|
||||
|
||||
try:
|
||||
with close_srv(ws.OpenSCManager(None, None, ws.SC_MANAGER_ALL_ACCESS)) as hscm:
|
||||
with close_srv(wsu.SmartOpenService(hscm, nt_service_name, ws.SERVICE_ALL_ACCESS)) as hs:
|
||||
info = ws.QueryServiceStatusEx(hs)
|
||||
return info['ProcessId'] == os.getppid()
|
||||
except Exception:
|
||||
return False
|
||||
80
odoo-bringout-oca-ocb-base/odoo/tools/parse_version.py
Normal file
80
odoo-bringout-oca-ocb-base/odoo/tools/parse_version.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
## this functions are taken from the setuptools package (version 0.6c8)
|
||||
## http://peak.telecommunity.com/DevCenter/PkgResources#parsing-utilities
|
||||
|
||||
from __future__ import print_function
|
||||
import re
|
||||
|
||||
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
|
||||
replace = {'pre':'c', 'preview':'c','-':'final-','_':'final-','rc':'c','dev':'@','saas':'','~':''}.get
|
||||
|
||||
def _parse_version_parts(s):
|
||||
for part in component_re.split(s):
|
||||
part = replace(part,part)
|
||||
if not part or part=='.':
|
||||
continue
|
||||
if part[:1] in '0123456789':
|
||||
yield part.zfill(8) # pad for numeric comparison
|
||||
else:
|
||||
yield '*'+part
|
||||
|
||||
yield '*final' # ensure that alpha/beta/candidate are before final
|
||||
|
||||
def parse_version(s):
|
||||
"""Convert a version string to a chronologically-sortable key
|
||||
|
||||
This is a rough cross between distutils' StrictVersion and LooseVersion;
|
||||
if you give it versions that would work with StrictVersion, then it behaves
|
||||
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
|
||||
*possible* to create pathological version coding schemes that will fool
|
||||
this parser, but they should be very rare in practice.
|
||||
|
||||
The returned value will be a tuple of strings. Numeric portions of the
|
||||
version are padded to 8 digits so they will compare numerically, but
|
||||
without relying on how numbers compare relative to strings. Dots are
|
||||
dropped, but dashes are retained. Trailing zeros between alpha segments
|
||||
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
|
||||
"2.4". Alphanumeric parts are lower-cased.
|
||||
|
||||
The algorithm assumes that strings like "-" and any alpha string that
|
||||
alphabetically follows "final" represents a "patch level". So, "2.4-1"
|
||||
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
|
||||
considered newer than "2.4-1", which in turn is newer than "2.4".
|
||||
|
||||
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
|
||||
come before "final" alphabetically) are assumed to be pre-release versions,
|
||||
so that the version "2.4" is considered newer than "2.4a1".
|
||||
|
||||
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
|
||||
"rc" are treated as if they were "c", i.e. as though they were release
|
||||
candidates, and therefore are not as new as a version string that does not
|
||||
contain them.
|
||||
"""
|
||||
parts = []
|
||||
for part in _parse_version_parts((s or '0.1').lower()):
|
||||
if part.startswith('*'):
|
||||
if part<'*final': # remove '-' before a prerelease tag
|
||||
while parts and parts[-1]=='*final-': parts.pop()
|
||||
# remove trailing zeros from each series of numeric parts
|
||||
while parts and parts[-1]=='00000000':
|
||||
parts.pop()
|
||||
parts.append(part)
|
||||
return tuple(parts)
|
||||
|
||||
if __name__ == '__main__':
|
||||
def chk(lst, verbose=False):
|
||||
pvs = []
|
||||
for v in lst:
|
||||
pv = parse_version(v)
|
||||
pvs.append(pv)
|
||||
if verbose:
|
||||
print(v, pv)
|
||||
|
||||
for a, b in zip(pvs, pvs[1:]):
|
||||
assert a < b, '%s < %s == %s' % (a, b, a < b)
|
||||
|
||||
chk(('0', '4.2', '4.2.3.4', '5.0.0-alpha', '5.0.0-rc1', '5.0.0-rc1.1', '5.0.0_rc2', '5.0.0_rc3', '5.0.0'), False)
|
||||
chk(('5.0.0-0_rc3', '5.0.0-1dev', '5.0.0-1'), False)
|
||||
|
||||
456
odoo-bringout-oca-ocb-base/odoo/tools/pdf.py
Normal file
456
odoo-bringout-oca-ocb-base/odoo/tools/pdf.py
Normal file
|
|
@ -0,0 +1,456 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
import io
|
||||
import re
|
||||
|
||||
from datetime import datetime
|
||||
from hashlib import md5
|
||||
from logging import getLogger
|
||||
from zlib import compress, decompress
|
||||
from PIL import Image, PdfImagePlugin
|
||||
from reportlab.lib import colors
|
||||
from reportlab.lib.units import cm
|
||||
from reportlab.lib.utils import ImageReader
|
||||
from reportlab.pdfgen import canvas
|
||||
|
||||
try:
|
||||
# class were renamed in PyPDF2 > 2.0
|
||||
# https://pypdf2.readthedocs.io/en/latest/user/migration-1-to-2.html#classes
|
||||
from PyPDF2 import PdfReader
|
||||
import PyPDF2
|
||||
# monkey patch to discard unused arguments as the old arguments were not discarded in the transitional class
|
||||
# https://pypdf2.readthedocs.io/en/2.0.0/_modules/PyPDF2/_reader.html#PdfReader
|
||||
class PdfFileReader(PdfReader):
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "strict" not in kwargs and len(args) < 2:
|
||||
kwargs["strict"] = True # maintain the default
|
||||
kwargs = {k:v for k, v in kwargs.items() if k in ('strict', 'stream')}
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
PyPDF2.PdfFileReader = PdfFileReader
|
||||
from PyPDF2 import PdfFileWriter, PdfFileReader
|
||||
PdfFileWriter._addObject = PdfFileWriter._add_object
|
||||
except ImportError:
|
||||
from PyPDF2 import PdfFileWriter, PdfFileReader
|
||||
|
||||
from PyPDF2.generic import DictionaryObject, NameObject, ArrayObject, DecodedStreamObject, NumberObject, createStringObject, ByteStringObject
|
||||
|
||||
try:
|
||||
from fontTools.ttLib import TTFont
|
||||
except ImportError:
|
||||
TTFont = None
|
||||
|
||||
from odoo.tools.misc import file_open
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
DEFAULT_PDF_DATETIME_FORMAT = "D:%Y%m%d%H%M%S+00'00'"
|
||||
REGEX_SUBTYPE_UNFORMATED = re.compile(r'^\w+/[\w-]+$')
|
||||
REGEX_SUBTYPE_FORMATED = re.compile(r'^/\w+#2F[\w-]+$')
|
||||
|
||||
|
||||
# Disable linter warning: this import is needed to make sure a PDF stream can be saved in Image.
|
||||
PdfImagePlugin.__name__
|
||||
|
||||
# make sure values are unwrapped by calling the specialized __getitem__
|
||||
def _unwrapping_get(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
|
||||
DictionaryObject.get = _unwrapping_get
|
||||
|
||||
|
||||
class BrandedFileWriter(PdfFileWriter):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.addMetadata({
|
||||
'/Creator': "Odoo",
|
||||
'/Producer': "Odoo",
|
||||
})
|
||||
|
||||
|
||||
PdfFileWriter = BrandedFileWriter
|
||||
|
||||
|
||||
def merge_pdf(pdf_data):
|
||||
''' Merge a collection of PDF documents in one.
|
||||
Note that the attachments are not merged.
|
||||
:param list pdf_data: a list of PDF datastrings
|
||||
:return: a unique merged PDF datastring
|
||||
'''
|
||||
writer = PdfFileWriter()
|
||||
for document in pdf_data:
|
||||
reader = PdfFileReader(io.BytesIO(document), strict=False)
|
||||
for page in range(0, reader.getNumPages()):
|
||||
writer.addPage(reader.getPage(page))
|
||||
with io.BytesIO() as _buffer:
|
||||
writer.write(_buffer)
|
||||
return _buffer.getvalue()
|
||||
|
||||
|
||||
def rotate_pdf(pdf):
|
||||
''' Rotate clockwise PDF (90°) into a new PDF.
|
||||
Note that the attachments are not copied.
|
||||
:param pdf: a PDF to rotate
|
||||
:return: a PDF rotated
|
||||
'''
|
||||
writer = PdfFileWriter()
|
||||
reader = PdfFileReader(io.BytesIO(pdf), strict=False)
|
||||
for page in range(0, reader.getNumPages()):
|
||||
page = reader.getPage(page)
|
||||
page.rotateClockwise(90)
|
||||
writer.addPage(page)
|
||||
with io.BytesIO() as _buffer:
|
||||
writer.write(_buffer)
|
||||
return _buffer.getvalue()
|
||||
|
||||
|
||||
def to_pdf_stream(attachment) -> io.BytesIO:
|
||||
"""Get the byte stream of the attachment as a PDF."""
|
||||
stream = io.BytesIO(attachment.raw)
|
||||
if attachment.mimetype == 'application/pdf':
|
||||
return stream
|
||||
elif attachment.mimetype.startswith('image'):
|
||||
output_stream = io.BytesIO()
|
||||
Image.open(stream).convert("RGB").save(output_stream, format="pdf")
|
||||
return output_stream
|
||||
_logger.warning("mimetype (%s) not recognized for %s", attachment.mimetype, attachment)
|
||||
|
||||
|
||||
def add_banner(pdf_stream, text=None, logo=False, thickness=2 * cm):
|
||||
""" Add a banner on a PDF in the upper right corner, with Odoo's logo (optionally).
|
||||
|
||||
:param pdf_stream (BytesIO): The PDF stream where the banner will be applied.
|
||||
:param text (str): The text to be displayed.
|
||||
:param logo (bool): Whether to display Odoo's logo in the banner.
|
||||
:param thickness (float): The thickness of the banner in pixels.
|
||||
:return (BytesIO): The modified PDF stream.
|
||||
"""
|
||||
|
||||
old_pdf = PdfFileReader(pdf_stream, strict=False, overwriteWarnings=False)
|
||||
packet = io.BytesIO()
|
||||
can = canvas.Canvas(packet)
|
||||
odoo_logo = Image.open(file_open('base/static/img/main_partner-image.png', mode='rb'))
|
||||
odoo_color = colors.Color(113 / 255, 75 / 255, 103 / 255, 0.8)
|
||||
|
||||
for p in range(old_pdf.getNumPages()):
|
||||
page = old_pdf.getPage(p)
|
||||
width = float(abs(page.mediaBox.getWidth()))
|
||||
height = float(abs(page.mediaBox.getHeight()))
|
||||
|
||||
can.setPageSize((width, height))
|
||||
can.translate(width, height)
|
||||
can.rotate(-45)
|
||||
|
||||
# Draw banner
|
||||
path = can.beginPath()
|
||||
path.moveTo(-width, -thickness)
|
||||
path.lineTo(-width, -2 * thickness)
|
||||
path.lineTo(width, -2 * thickness)
|
||||
path.lineTo(width, -thickness)
|
||||
can.setFillColor(odoo_color)
|
||||
can.drawPath(path, fill=1, stroke=False)
|
||||
|
||||
# Insert text (and logo) inside the banner
|
||||
can.setFontSize(10)
|
||||
can.setFillColor(colors.white)
|
||||
can.drawRightString(0.75 * thickness, -1.45 * thickness, text)
|
||||
logo and can.drawImage(
|
||||
ImageReader(odoo_logo), 0.25 * thickness, -2.05 * thickness, 40, 40, mask='auto', preserveAspectRatio=True)
|
||||
|
||||
can.showPage()
|
||||
|
||||
can.save()
|
||||
|
||||
# Merge the old pages with the watermark
|
||||
watermark_pdf = PdfFileReader(packet, overwriteWarnings=False)
|
||||
new_pdf = PdfFileWriter()
|
||||
for p in range(old_pdf.getNumPages()):
|
||||
new_page = old_pdf.getPage(p)
|
||||
# Remove annotations (if any), to prevent errors in PyPDF2
|
||||
if '/Annots' in new_page:
|
||||
del new_page['/Annots']
|
||||
new_page.mergePage(watermark_pdf.getPage(p))
|
||||
new_pdf.addPage(new_page)
|
||||
|
||||
# Write the new pdf into a new output stream
|
||||
output = io.BytesIO()
|
||||
new_pdf.write(output)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
# by default PdfFileReader will overwrite warnings.showwarning which is what
|
||||
# logging.captureWarnings does, meaning it essentially reverts captureWarnings
|
||||
# every time it's called which is undesirable
|
||||
old_init = PdfFileReader.__init__
|
||||
PdfFileReader.__init__ = lambda self, stream, strict=True, warndest=None, overwriteWarnings=True: \
|
||||
old_init(self, stream=stream, strict=strict, warndest=None, overwriteWarnings=False)
|
||||
|
||||
class OdooPdfFileReader(PdfFileReader):
|
||||
# OVERRIDE of PdfFileReader to add the management of multiple embedded files.
|
||||
|
||||
''' Returns the files inside the PDF.
|
||||
:raises NotImplementedError: if document is encrypted and uses an unsupported encryption method.
|
||||
'''
|
||||
def getAttachments(self):
|
||||
if self.isEncrypted:
|
||||
# If the PDF is owner-encrypted, try to unwrap it by giving it an empty user password.
|
||||
self.decrypt('')
|
||||
|
||||
try:
|
||||
file_path = self.trailer["/Root"].get("/Names", {}).get("/EmbeddedFiles", {}).get("/Names")
|
||||
|
||||
if not file_path:
|
||||
return []
|
||||
for i in range(0, len(file_path), 2):
|
||||
attachment = file_path[i+1].getObject()
|
||||
yield (attachment["/F"], attachment["/EF"]["/F"].getObject().getData())
|
||||
except Exception:
|
||||
# malformed pdf (i.e. invalid xref page)
|
||||
return []
|
||||
|
||||
|
||||
class OdooPdfFileWriter(PdfFileWriter):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
Override of the init to initialise additional variables.
|
||||
:param pdf_content: if given, will initialise the reader with the pdf content.
|
||||
"""
|
||||
super().__init__(*args, **kwargs)
|
||||
self._reader = None
|
||||
self.is_pdfa = False
|
||||
|
||||
def addAttachment(self, name, data, subtype=None):
|
||||
"""
|
||||
Add an attachment to the pdf. Supports adding multiple attachment, while respecting PDF/A rules.
|
||||
:param name: The name of the attachement
|
||||
:param data: The data of the attachement
|
||||
:param subtype: The mime-type of the attachement. This is required by PDF/A, but not essential otherwise.
|
||||
It should take the form of "/xxx#2Fxxx". E.g. for "text/xml": "/text#2Fxml"
|
||||
"""
|
||||
adapted_subtype = subtype
|
||||
if subtype:
|
||||
# If we receive the subtype in an 'unformated' (mimetype) format, we'll try to convert it to a pdf-valid one
|
||||
if REGEX_SUBTYPE_UNFORMATED.match(subtype):
|
||||
adapted_subtype = '/' + subtype.replace('/', '#2F')
|
||||
|
||||
if not REGEX_SUBTYPE_FORMATED.match(adapted_subtype):
|
||||
# The subtype still does not match the correct format, so we will not add it to the document
|
||||
_logger.warning("Attempt to add an attachment with the incorrect subtype '%s'. The subtype will be ignored.", subtype)
|
||||
adapted_subtype = ''
|
||||
|
||||
attachment = self._create_attachment_object({
|
||||
'filename': name,
|
||||
'content': data,
|
||||
'subtype': adapted_subtype,
|
||||
})
|
||||
if self._root_object.get('/Names') and self._root_object['/Names'].get('/EmbeddedFiles'):
|
||||
names_array = self._root_object["/Names"]["/EmbeddedFiles"]["/Names"]
|
||||
names_array.extend([attachment.getObject()['/F'], attachment])
|
||||
else:
|
||||
names_array = ArrayObject()
|
||||
names_array.extend([attachment.getObject()['/F'], attachment])
|
||||
|
||||
embedded_files_names_dictionary = DictionaryObject()
|
||||
embedded_files_names_dictionary.update({
|
||||
NameObject("/Names"): names_array
|
||||
})
|
||||
embedded_files_dictionary = DictionaryObject()
|
||||
embedded_files_dictionary.update({
|
||||
NameObject("/EmbeddedFiles"): embedded_files_names_dictionary
|
||||
})
|
||||
self._root_object.update({
|
||||
NameObject("/Names"): embedded_files_dictionary
|
||||
})
|
||||
|
||||
if self._root_object.get('/AF'):
|
||||
attachment_array = self._root_object['/AF']
|
||||
attachment_array.extend([attachment])
|
||||
else:
|
||||
# Create a new object containing an array referencing embedded file
|
||||
# And reference this array in the root catalogue
|
||||
attachment_array = self._addObject(ArrayObject([attachment]))
|
||||
self._root_object.update({
|
||||
NameObject("/AF"): attachment_array
|
||||
})
|
||||
|
||||
def embed_odoo_attachment(self, attachment, subtype=None):
|
||||
assert attachment, "embed_odoo_attachment cannot be called without attachment."
|
||||
self.addAttachment(attachment.name, attachment.raw, subtype=subtype or attachment.mimetype)
|
||||
|
||||
def cloneReaderDocumentRoot(self, reader):
|
||||
super().cloneReaderDocumentRoot(reader)
|
||||
self._reader = reader
|
||||
# Try to read the header coming in, and reuse it in our new PDF
|
||||
# This is done in order to allows modifying PDF/A files after creating them (as PyPDF does not read it)
|
||||
stream = reader.stream
|
||||
stream.seek(0)
|
||||
header = stream.readlines(9)
|
||||
# Should always be true, the first line of a pdf should have 9 bytes (%PDF-1.x plus a newline)
|
||||
if len(header) == 1:
|
||||
# If we found a header, set it back to the new pdf
|
||||
self._header = header[0]
|
||||
# Also check the second line. If it is PDF/A, it should be a line starting by % following by four bytes + \n
|
||||
second_line = stream.readlines(1)[0]
|
||||
if second_line.decode('latin-1')[0] == '%' and len(second_line) == 6:
|
||||
self._header += second_line
|
||||
self.is_pdfa = True
|
||||
# Look if we have an ID in the incoming stream and use it.
|
||||
pdf_id = reader.trailer.get('/ID', None)
|
||||
if pdf_id:
|
||||
self._ID = pdf_id
|
||||
|
||||
def convert_to_pdfa(self):
|
||||
"""
|
||||
Transform the opened PDF file into a PDF/A compliant file
|
||||
"""
|
||||
# Set the PDF version to 1.7 (as PDF/A-3 is based on version 1.7) and make it PDF/A compliant.
|
||||
# See https://github.com/veraPDF/veraPDF-validation-profiles/wiki/PDFA-Parts-2-and-3-rules#rule-612-1
|
||||
|
||||
# " The file header shall begin at byte zero and shall consist of "%PDF-1.n" followed by a single EOL marker,
|
||||
# where 'n' is a single digit number between 0 (30h) and 7 (37h) "
|
||||
# " The aforementioned EOL marker shall be immediately followed by a % (25h) character followed by at least four
|
||||
# bytes, each of whose encoded byte values shall have a decimal value greater than 127 "
|
||||
self._header = b"%PDF-1.7\n%\xFF\xFF\xFF\xFF"
|
||||
|
||||
# Add a document ID to the trailer. This is only needed when using encryption with regular PDF, but is required
|
||||
# when using PDF/A
|
||||
pdf_id = ByteStringObject(md5(self._reader.stream.getvalue()).digest())
|
||||
# The first string is based on the content at the time of creating the file, while the second is based on the
|
||||
# content of the file when it was last updated. When creating a PDF, both are set to the same value.
|
||||
self._ID = ArrayObject((pdf_id, pdf_id))
|
||||
|
||||
with file_open('tools/data/files/sRGB2014.icc', mode='rb') as icc_profile:
|
||||
icc_profile_file_data = compress(icc_profile.read())
|
||||
|
||||
icc_profile_stream_obj = DecodedStreamObject()
|
||||
icc_profile_stream_obj.setData(icc_profile_file_data)
|
||||
icc_profile_stream_obj.update({
|
||||
NameObject("/Filter"): NameObject("/FlateDecode"),
|
||||
NameObject("/N"): NumberObject(3),
|
||||
NameObject("/Length"): NameObject(str(len(icc_profile_file_data))),
|
||||
})
|
||||
|
||||
icc_profile_obj = self._addObject(icc_profile_stream_obj)
|
||||
|
||||
output_intent_dict_obj = DictionaryObject()
|
||||
output_intent_dict_obj.update({
|
||||
NameObject("/S"): NameObject("/GTS_PDFA1"),
|
||||
NameObject("/OutputConditionIdentifier"): createStringObject("sRGB"),
|
||||
NameObject("/DestOutputProfile"): icc_profile_obj,
|
||||
NameObject("/Type"): NameObject("/OutputIntent"),
|
||||
})
|
||||
|
||||
output_intent_obj = self._addObject(output_intent_dict_obj)
|
||||
self._root_object.update({
|
||||
NameObject("/OutputIntents"): ArrayObject([output_intent_obj]),
|
||||
})
|
||||
|
||||
pages = self._root_object['/Pages']['/Kids']
|
||||
|
||||
# PDF/A needs the glyphs width array embedded in the pdf to be consistent with the ones from the font file.
|
||||
# But it seems like it is not the case when exporting from wkhtmltopdf.
|
||||
if TTFont:
|
||||
fonts = {}
|
||||
# First browse through all the pages of the pdf file, to get a reference to all the fonts used in the PDF.
|
||||
for page in pages:
|
||||
for font in page.getObject()['/Resources']['/Font'].values():
|
||||
for descendant in font.getObject()['/DescendantFonts']:
|
||||
fonts[descendant.idnum] = descendant.getObject()
|
||||
|
||||
# Then for each font, rewrite the width array with the information taken directly from the font file.
|
||||
# The new width are calculated such as width = round(1000 * font_glyph_width / font_units_per_em)
|
||||
# See: http://martin.hoppenheit.info/blog/2018/pdfa-validation-and-inconsistent-glyph-width-information/
|
||||
for font in fonts.values():
|
||||
font_file = font['/FontDescriptor']['/FontFile2']
|
||||
stream = io.BytesIO(decompress(font_file._data))
|
||||
ttfont = TTFont(stream)
|
||||
font_upm = ttfont['head'].unitsPerEm
|
||||
glyphs = ttfont.getGlyphSet()._hmtx.metrics
|
||||
glyph_widths = []
|
||||
for key, values in glyphs.items():
|
||||
if key[:5] == 'glyph':
|
||||
glyph_widths.append(NumberObject(round(1000.0 * values[0] / font_upm)))
|
||||
|
||||
font[NameObject('/W')] = ArrayObject([NumberObject(1), ArrayObject(glyph_widths)])
|
||||
stream.close()
|
||||
else:
|
||||
_logger.warning('The fonttools package is not installed. Generated PDF may not be PDF/A compliant.')
|
||||
|
||||
outlines = self._root_object['/Outlines'].getObject()
|
||||
outlines[NameObject('/Count')] = NumberObject(1)
|
||||
|
||||
# Set odoo as producer
|
||||
self.addMetadata({
|
||||
'/Creator': "Odoo",
|
||||
'/Producer': "Odoo",
|
||||
})
|
||||
self.is_pdfa = True
|
||||
|
||||
def add_file_metadata(self, metadata_content):
|
||||
"""
|
||||
Set the XMP metadata of the pdf, wrapping it with the necessary XMP header/footer.
|
||||
These are required for a PDF/A file to be completely compliant. Ommiting them would result in validation errors.
|
||||
:param metadata_content: bytes of the metadata to add to the pdf.
|
||||
"""
|
||||
# See https://wwwimages2.adobe.com/content/dam/acom/en/devnet/xmp/pdfs/XMP%20SDK%20Release%20cc-2016-08/XMPSpecificationPart1.pdf
|
||||
# Page 10/11
|
||||
header = b'<?xpacket begin="" id="W5M0MpCehiHzreSzNTczkc9d"?>'
|
||||
footer = b'<?xpacket end="w"?>'
|
||||
metadata = b'%s%s%s' % (header, metadata_content, footer)
|
||||
file_entry = DecodedStreamObject()
|
||||
file_entry.setData(metadata)
|
||||
file_entry.update({
|
||||
NameObject("/Type"): NameObject("/Metadata"),
|
||||
NameObject("/Subtype"): NameObject("/XML"),
|
||||
NameObject("/Length"): NameObject(str(len(metadata))),
|
||||
})
|
||||
|
||||
# Add the new metadata to the pdf, then redirect the reference to refer to this new object.
|
||||
metadata_object = self._addObject(file_entry)
|
||||
self._root_object.update({NameObject("/Metadata"): metadata_object})
|
||||
|
||||
def _create_attachment_object(self, attachment):
|
||||
''' Create a PyPdf2.generic object representing an embedded file.
|
||||
|
||||
:param attachment: A dictionary containing:
|
||||
* filename: The name of the file to embed (required)
|
||||
* content: The bytes of the file to embed (required)
|
||||
* subtype: The mime-type of the file to embed (optional)
|
||||
:return:
|
||||
'''
|
||||
file_entry = DecodedStreamObject()
|
||||
file_entry.setData(attachment['content'])
|
||||
file_entry.update({
|
||||
NameObject("/Type"): NameObject("/EmbeddedFile"),
|
||||
NameObject("/Params"):
|
||||
DictionaryObject({
|
||||
NameObject('/CheckSum'): createStringObject(md5(attachment['content']).hexdigest()),
|
||||
NameObject('/ModDate'): createStringObject(datetime.now().strftime(DEFAULT_PDF_DATETIME_FORMAT)),
|
||||
NameObject('/Size'): NameObject(f"/{len(attachment['content'])}"),
|
||||
}),
|
||||
})
|
||||
if attachment.get('subtype'):
|
||||
file_entry.update({
|
||||
NameObject("/Subtype"): NameObject(attachment['subtype']),
|
||||
})
|
||||
file_entry_object = self._addObject(file_entry)
|
||||
filename_object = createStringObject(attachment['filename'])
|
||||
filespec_object = DictionaryObject({
|
||||
NameObject("/AFRelationship"): NameObject("/Data"),
|
||||
NameObject("/Type"): NameObject("/Filespec"),
|
||||
NameObject("/F"): filename_object,
|
||||
NameObject("/EF"):
|
||||
DictionaryObject({
|
||||
NameObject("/F"): file_entry_object,
|
||||
NameObject('/UF'): file_entry_object,
|
||||
}),
|
||||
NameObject("/UF"): filename_object,
|
||||
})
|
||||
if attachment.get('description'):
|
||||
filespec_object.update({NameObject("/Desc"): createStringObject(attachment['description'])})
|
||||
return self._addObject(filespec_object)
|
||||
181
odoo-bringout-oca-ocb-base/odoo/tools/populate.py
Normal file
181
odoo-bringout-oca-ocb-base/odoo/tools/populate.py
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
import random
|
||||
from datetime import datetime
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from odoo.tools import pycompat
|
||||
|
||||
|
||||
def Random(seed):
|
||||
""" Return a random number generator object with the given seed. """
|
||||
r = random.Random()
|
||||
r.seed(seed, version=2)
|
||||
return r
|
||||
|
||||
|
||||
def format_str(val, counter, values):
|
||||
""" Format the given value (with method ``format``) when it is a string. """
|
||||
if isinstance(val, str):
|
||||
return val.format(counter=counter, values=values)
|
||||
return val
|
||||
|
||||
|
||||
def chain_factories(field_factories, model_name):
|
||||
""" Instantiate a generator by calling all the field factories. """
|
||||
generator = root_factory()
|
||||
for (fname, field_factory) in field_factories:
|
||||
generator = field_factory(generator, fname, model_name)
|
||||
return generator
|
||||
|
||||
|
||||
def root_factory():
|
||||
""" Return a generator with empty values dictionaries (except for the flag ``__complete``). """
|
||||
yield {'__complete': False}
|
||||
while True:
|
||||
yield {'__complete': True}
|
||||
|
||||
|
||||
def randomize(vals, weights=None, seed=False, formatter=format_str, counter_offset=0):
|
||||
""" Return a factory for an iterator of values dicts with pseudo-randomly
|
||||
chosen values (among ``vals``) for a field.
|
||||
|
||||
:param list vals: list in which a value will be chosen, depending on `weights`
|
||||
:param list weights: list of probabilistic weights
|
||||
:param seed: optional initialization of the random number generator
|
||||
:param function formatter: (val, counter, values) --> formatted_value
|
||||
:param int counter_offset:
|
||||
:returns: function of the form (iterator, field_name, model_name) -> values
|
||||
:rtype: function (iterator, str, str) -> dict
|
||||
"""
|
||||
def generate(iterator, field_name, model_name):
|
||||
r = Random('%s+field+%s' % (model_name, seed or field_name))
|
||||
for counter, values in enumerate(iterator):
|
||||
val = r.choices(vals, weights)[0]
|
||||
values[field_name] = formatter(val, counter + counter_offset, values)
|
||||
yield values
|
||||
return generate
|
||||
|
||||
|
||||
def cartesian(vals, weights=None, seed=False, formatter=format_str, then=None):
|
||||
""" Return a factory for an iterator of values dicts that combines all ``vals`` for
|
||||
the field with the other field values in input.
|
||||
|
||||
:param list vals: list in which a value will be chosen, depending on `weights`
|
||||
:param list weights: list of probabilistic weights
|
||||
:param seed: optional initialization of the random number generator
|
||||
:param function formatter: (val, counter, values) --> formatted_value
|
||||
:param function then: if defined, factory used when vals has been consumed.
|
||||
:returns: function of the form (iterator, field_name, model_name) -> values
|
||||
:rtype: function (iterator, str, str) -> dict
|
||||
"""
|
||||
def generate(iterator, field_name, model_name):
|
||||
counter = 0
|
||||
for values in iterator:
|
||||
if values['__complete']:
|
||||
break # will consume and lose an element, (complete so a filling element). If it is a problem, use peekable instead.
|
||||
for val in vals:
|
||||
yield {**values, field_name: formatter(val, counter, values)}
|
||||
counter += 1
|
||||
factory = then or randomize(vals, weights, seed, formatter, counter)
|
||||
yield from factory(iterator, field_name, model_name)
|
||||
return generate
|
||||
|
||||
|
||||
def iterate(vals, weights=None, seed=False, formatter=format_str, then=None):
|
||||
""" Return a factory for an iterator of values dicts that picks a value among ``vals``
|
||||
for each input. Once all ``vals`` have been used once, resume as ``then`` or as a
|
||||
``randomize`` generator.
|
||||
|
||||
:param list vals: list in which a value will be chosen, depending on `weights`
|
||||
:param list weights: list of probabilistic weights
|
||||
:param seed: optional initialization of the random number generator
|
||||
:param function formatter: (val, counter, values) --> formatted_value
|
||||
:param function then: if defined, factory used when vals has been consumed.
|
||||
:returns: function of the form (iterator, field_name, model_name) -> values
|
||||
:rtype: function (iterator, str, str) -> dict
|
||||
"""
|
||||
def generate(iterator, field_name, model_name):
|
||||
counter = 0
|
||||
for val in vals: # iteratable order is important, shortest first
|
||||
values = next(iterator)
|
||||
values[field_name] = formatter(val, counter, values)
|
||||
values['__complete'] = False
|
||||
yield values
|
||||
counter += 1
|
||||
factory = then or randomize(vals, weights, seed, formatter, counter)
|
||||
yield from factory(iterator, field_name, model_name)
|
||||
return generate
|
||||
|
||||
|
||||
def constant(val, formatter=format_str):
|
||||
""" Return a factory for an iterator of values dicts that sets the field
|
||||
to the given value in each input dict.
|
||||
|
||||
:returns: function of the form (iterator, field_name, model_name) -> values
|
||||
:rtype: function (iterator, str, str) -> dict
|
||||
"""
|
||||
def generate(iterator, field_name, _):
|
||||
for counter, values in enumerate(iterator):
|
||||
values[field_name] = formatter(val, counter, values)
|
||||
yield values
|
||||
return generate
|
||||
|
||||
|
||||
def compute(function, seed=None):
|
||||
""" Return a factory for an iterator of values dicts that computes the field value
|
||||
as ``function(values, counter, random)``, where ``values`` is the other field values,
|
||||
``counter`` is an integer, and ``random`` is a pseudo-random number generator.
|
||||
|
||||
:param callable function: (values, counter, random) --> field_values
|
||||
:param seed: optional initialization of the random number generator
|
||||
:returns: function of the form (iterator, field_name, model_name) -> values
|
||||
:rtype: function (iterator, str, str) -> dict
|
||||
"""
|
||||
def generate(iterator, field_name, model_name):
|
||||
r = Random('%s+field+%s' % (model_name, seed or field_name))
|
||||
for counter, values in enumerate(iterator):
|
||||
val = function(values=values, counter=counter, random=r)
|
||||
values[field_name] = val
|
||||
yield values
|
||||
return generate
|
||||
|
||||
def randint(a, b, seed=None):
|
||||
""" Return a factory for an iterator of values dicts that sets the field
|
||||
to a random integer between a and b included in each input dict.
|
||||
|
||||
:param int a: minimal random value
|
||||
:param int b: maximal random value
|
||||
:param int seed:
|
||||
:returns: function of the form (iterator, field_name, model_name) -> values
|
||||
:rtype: function (iterator, str, str) -> dict
|
||||
"""
|
||||
def get_rand_int(random=None, **kwargs):
|
||||
return random.randint(a, b)
|
||||
return compute(get_rand_int, seed=seed)
|
||||
|
||||
def randfloat(a, b, seed=None):
|
||||
""" Return a factory for an iterator of values dicts that sets the field
|
||||
to a random float between a and b included in each input dict.
|
||||
"""
|
||||
def get_rand_float(random=None, **kwargs):
|
||||
return random.uniform(a, b)
|
||||
return compute(get_rand_float, seed=seed)
|
||||
|
||||
def randdatetime(*, base_date=None, relative_before=None, relative_after=None, seed=None):
|
||||
""" Return a factory for an iterator of values dicts that sets the field
|
||||
to a random datetime between relative_before and relative_after, relatively to
|
||||
base_date
|
||||
|
||||
:param datetime base_date: override the default base date if needed.
|
||||
:param relativedelta|timedelta relative_after: range up which we can go after the
|
||||
base date. If not set, defaults to 0, i.e. only in the past of reference.
|
||||
:param relativedelta|timedelta relative_before: range up which we can go before the
|
||||
base date. If not set, defaults to 0, i.e. only in the future of reference.
|
||||
:param seed:
|
||||
:return: iterator for random dates inside the defined range
|
||||
"""
|
||||
base_date = base_date or datetime(2020, 1, 1)
|
||||
seconds_before = relative_before and ((base_date + relative_before) - base_date).total_seconds() or 0
|
||||
seconds_after = relative_after and ((base_date + relative_after) - base_date).total_seconds() or 0
|
||||
|
||||
def get_rand_datetime(random=None, **kwargs):
|
||||
return base_date + relativedelta(seconds=random.randint(int(seconds_before), int(seconds_after)))
|
||||
return compute(get_rand_datetime, seed=seed)
|
||||
736
odoo-bringout-oca-ocb-base/odoo/tools/profiler.py
Normal file
736
odoo-bringout-oca-ocb-base/odoo/tools/profiler.py
Normal file
|
|
@ -0,0 +1,736 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
from datetime import datetime
|
||||
import gc
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
import re
|
||||
import functools
|
||||
|
||||
from psycopg2 import sql
|
||||
|
||||
from odoo import tools
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
# ensure we have a non patched time for profiling times when using freezegun
|
||||
real_datetime_now = datetime.now
|
||||
real_time = time.time.__call__
|
||||
|
||||
def _format_frame(frame):
|
||||
code = frame.f_code
|
||||
return (code.co_filename, frame.f_lineno, code.co_name, '')
|
||||
|
||||
|
||||
def _format_stack(stack):
|
||||
return [list(frame) for frame in stack]
|
||||
|
||||
|
||||
def get_current_frame(thread=None):
|
||||
if thread:
|
||||
frame = sys._current_frames()[thread.ident]
|
||||
else:
|
||||
frame = sys._getframe()
|
||||
while frame.f_code.co_filename == __file__:
|
||||
frame = frame.f_back
|
||||
return frame
|
||||
|
||||
|
||||
def _get_stack_trace(frame, limit_frame=None):
|
||||
stack = []
|
||||
while frame is not None and frame != limit_frame:
|
||||
stack.append(_format_frame(frame))
|
||||
frame = frame.f_back
|
||||
if frame is None and limit_frame:
|
||||
_logger.error("Limit frame was not found")
|
||||
return list(reversed(stack))
|
||||
|
||||
|
||||
def stack_size():
|
||||
frame = get_current_frame()
|
||||
size = 0
|
||||
while frame:
|
||||
size += 1
|
||||
frame = frame.f_back
|
||||
return size
|
||||
|
||||
|
||||
def make_session(name=''):
|
||||
return f'{real_datetime_now():%Y-%m-%d %H:%M:%S} {name}'
|
||||
|
||||
|
||||
def force_hook():
|
||||
"""
|
||||
Force periodic profiling collectors to generate some stack trace. This is
|
||||
useful before long calls that do not release the GIL, so that the time
|
||||
spent in those calls is attributed to a specific stack trace, instead of
|
||||
some arbitrary former frame.
|
||||
"""
|
||||
thread = threading.current_thread()
|
||||
for func in getattr(thread, 'profile_hooks', ()):
|
||||
func()
|
||||
|
||||
|
||||
class Collector:
|
||||
"""
|
||||
Base class for objects that collect profiling data.
|
||||
|
||||
A collector object is used by a profiler to collect profiling data, most
|
||||
likely a list of stack traces with time and some context information added
|
||||
by ExecutionContext decorator on current thread.
|
||||
|
||||
This is a generic implementation of a basic collector, to be inherited.
|
||||
It defines default behaviors for creating an entry in the collector.
|
||||
"""
|
||||
name = None # symbolic name of the collector
|
||||
_registry = {} # map collector names to their class
|
||||
|
||||
@classmethod
|
||||
def __init_subclass__(cls):
|
||||
if cls.name:
|
||||
cls._registry[cls.name] = cls
|
||||
cls._registry[cls.__name__] = cls
|
||||
|
||||
@classmethod
|
||||
def make(cls, name, *args, **kwargs):
|
||||
""" Instantiate a collector corresponding to the given name. """
|
||||
return cls._registry[name](*args, **kwargs)
|
||||
|
||||
def __init__(self):
|
||||
self._processed = False
|
||||
self._entries = []
|
||||
self.profiler = None
|
||||
|
||||
def start(self):
|
||||
""" Start the collector. """
|
||||
|
||||
def stop(self):
|
||||
""" Stop the collector. """
|
||||
|
||||
def add(self, entry=None, frame=None):
|
||||
""" Add an entry (dict) to this collector. """
|
||||
self._entries.append({
|
||||
'stack': self._get_stack_trace(frame),
|
||||
'exec_context': getattr(self.profiler.init_thread, 'exec_context', ()),
|
||||
'start': real_time(),
|
||||
**(entry or {}),
|
||||
})
|
||||
|
||||
def progress(self, entry=None, frame=None):
|
||||
""" Checks if the limits were met and add to the entries"""
|
||||
if self.profiler.entry_count_limit \
|
||||
and self.profiler.entry_count() >= self.profiler.entry_count_limit:
|
||||
self.profiler.end()
|
||||
|
||||
self.add(entry=entry, frame=frame)
|
||||
|
||||
def _get_stack_trace(self, frame=None):
|
||||
""" Return the stack trace to be included in a given entry. """
|
||||
frame = frame or get_current_frame(self.profiler.init_thread)
|
||||
return _get_stack_trace(frame, self.profiler.init_frame)
|
||||
|
||||
def post_process(self):
|
||||
for entry in self._entries:
|
||||
stack = entry.get('stack', [])
|
||||
self.profiler._add_file_lines(stack)
|
||||
|
||||
@property
|
||||
def entries(self):
|
||||
""" Return the entries of the collector after postprocessing. """
|
||||
if not self._processed:
|
||||
self.post_process()
|
||||
self._processed = True
|
||||
return self._entries
|
||||
|
||||
|
||||
class SQLCollector(Collector):
|
||||
"""
|
||||
Saves all executed queries in the current thread with the call stack.
|
||||
"""
|
||||
name = 'sql'
|
||||
|
||||
def start(self):
|
||||
init_thread = self.profiler.init_thread
|
||||
if not hasattr(init_thread, 'query_hooks'):
|
||||
init_thread.query_hooks = []
|
||||
init_thread.query_hooks.append(self.hook)
|
||||
|
||||
def stop(self):
|
||||
self.profiler.init_thread.query_hooks.remove(self.hook)
|
||||
|
||||
def hook(self, cr, query, params, query_start, query_time):
|
||||
self.progress({
|
||||
'query': str(query),
|
||||
'full_query': str(cr._format(query, params)),
|
||||
'start': query_start,
|
||||
'time': query_time,
|
||||
})
|
||||
|
||||
|
||||
class PeriodicCollector(Collector):
|
||||
"""
|
||||
Record execution frames asynchronously at most every `interval` seconds.
|
||||
|
||||
:param interval (float): time to wait in seconds between two samples.
|
||||
"""
|
||||
name = 'traces_async'
|
||||
|
||||
def __init__(self, interval=0.01): # check duration. dynamic?
|
||||
super().__init__()
|
||||
self.active = False
|
||||
self.frame_interval = interval
|
||||
self.__thread = threading.Thread(target=self.run)
|
||||
self.last_frame = None
|
||||
|
||||
def run(self):
|
||||
self.active = True
|
||||
last_time = real_time()
|
||||
while self.active: # maybe add a check on parent_thread state?
|
||||
duration = real_time() - last_time
|
||||
if duration > self.frame_interval * 10 and self.last_frame:
|
||||
# The profiler has unexpectedly slept for more than 10 frame intervals. This may
|
||||
# happen when calling a C library without releasing the GIL. In that case, the
|
||||
# last frame was taken before the call, and the next frame is after the call, and
|
||||
# the call itself does not appear in any of those frames: the duration of the call
|
||||
# is incorrectly attributed to the last frame.
|
||||
self._entries[-1]['stack'].append(('profiling', 0, '⚠ Profiler freezed for %s s' % duration, ''))
|
||||
self.last_frame = None # skip duplicate detection for the next frame.
|
||||
self.progress()
|
||||
last_time = real_time()
|
||||
time.sleep(self.frame_interval)
|
||||
|
||||
self._entries.append({'stack': [], 'start': real_time()}) # add final end frame
|
||||
|
||||
def start(self):
|
||||
interval = self.profiler.params.get('traces_async_interval')
|
||||
if interval:
|
||||
self.frame_interval = min(max(float(interval), 0.001), 1)
|
||||
|
||||
init_thread = self.profiler.init_thread
|
||||
if not hasattr(init_thread, 'profile_hooks'):
|
||||
init_thread.profile_hooks = []
|
||||
init_thread.profile_hooks.append(self.progress)
|
||||
|
||||
self.__thread.start()
|
||||
|
||||
def stop(self):
|
||||
self.active = False
|
||||
self.__thread.join()
|
||||
self.profiler.init_thread.profile_hooks.remove(self.progress)
|
||||
|
||||
def add(self, entry=None, frame=None):
|
||||
""" Add an entry (dict) to this collector. """
|
||||
frame = frame or get_current_frame(self.profiler.init_thread)
|
||||
if frame == self.last_frame:
|
||||
# don't save if the frame is exactly the same as the previous one.
|
||||
# maybe modify the last entry to add a last seen?
|
||||
return
|
||||
self.last_frame = frame
|
||||
super().add(entry=entry, frame=frame)
|
||||
|
||||
|
||||
class SyncCollector(Collector):
|
||||
"""
|
||||
Record complete execution synchronously.
|
||||
Note that --limit-memory-hard may need to be increased when launching Odoo.
|
||||
"""
|
||||
name = 'traces_sync'
|
||||
|
||||
def start(self):
|
||||
if sys.gettrace() is not None:
|
||||
_logger.error("Cannot start SyncCollector, settrace already set: %s", sys.gettrace())
|
||||
assert not self._processed, "You cannot start SyncCollector after accessing entries."
|
||||
sys.settrace(self.hook) # todo test setprofile, but maybe not multithread safe
|
||||
|
||||
def stop(self):
|
||||
sys.settrace(None)
|
||||
|
||||
def hook(self, _frame, event, _arg=None):
|
||||
if event == 'line':
|
||||
return
|
||||
entry = {'event': event, 'frame': _format_frame(_frame)}
|
||||
if event == 'call' and _frame.f_back:
|
||||
# we need the parent frame to determine the line number of the call
|
||||
entry['parent_frame'] = _format_frame(_frame.f_back)
|
||||
self.progress(entry, frame=_frame)
|
||||
return self.hook
|
||||
|
||||
def _get_stack_trace(self, frame=None):
|
||||
# Getting the full stack trace is slow, and not useful in this case.
|
||||
# SyncCollector only saves the top frame and event at each call and
|
||||
# recomputes the complete stack at the end.
|
||||
return None
|
||||
|
||||
def post_process(self):
|
||||
# Transform the evented traces to full stack traces. This processing
|
||||
# could be avoided since speedscope will transform that back to
|
||||
# evented anyway, but it is actually simpler to integrate into the
|
||||
# current speedscope logic, especially when mixed with SQLCollector.
|
||||
# We could improve it by saving as evented and manage it later.
|
||||
stack = []
|
||||
for entry in self._entries:
|
||||
frame = entry.pop('frame')
|
||||
event = entry.pop('event')
|
||||
if event == 'call':
|
||||
if stack:
|
||||
stack[-1] = entry.pop('parent_frame')
|
||||
stack.append(frame)
|
||||
elif event == 'return':
|
||||
stack.pop()
|
||||
entry['stack'] = stack[:]
|
||||
super().post_process()
|
||||
|
||||
|
||||
class QwebTracker():
|
||||
|
||||
@classmethod
|
||||
def wrap_render(cls, method_render):
|
||||
@functools.wraps(method_render)
|
||||
def _tracked_method_render(self, template, values=None, **options):
|
||||
current_thread = threading.current_thread()
|
||||
execution_context_enabled = getattr(current_thread, 'profiler_params', {}).get('execution_context_qweb')
|
||||
qweb_hooks = getattr(current_thread, 'qweb_hooks', ())
|
||||
if execution_context_enabled or qweb_hooks:
|
||||
# To have the new compilation cached because the generated code will change.
|
||||
# Therefore 'profile' is a key to the cache.
|
||||
options['profile'] = True
|
||||
return method_render(self, template, values, **options)
|
||||
return _tracked_method_render
|
||||
|
||||
@classmethod
|
||||
def wrap_compile(cls, method_compile):
|
||||
@functools.wraps(method_compile)
|
||||
def _tracked_compile(self, template):
|
||||
if not self.env.context.get('profile'):
|
||||
return method_compile(self, template)
|
||||
|
||||
template_functions, def_name = method_compile(self, template)
|
||||
render_template = template_functions[def_name]
|
||||
|
||||
def profiled_method_compile(self, values):
|
||||
options = template_functions['options']
|
||||
ref = options.get('ref')
|
||||
ref_xml = options.get('ref_xml')
|
||||
qweb_tracker = QwebTracker(ref, ref_xml, self.env.cr)
|
||||
self = self.with_context(qweb_tracker=qweb_tracker)
|
||||
if qweb_tracker.execution_context_enabled:
|
||||
with ExecutionContext(template=ref):
|
||||
return render_template(self, values)
|
||||
return render_template(self, values)
|
||||
template_functions[def_name] = profiled_method_compile
|
||||
|
||||
return (template_functions, def_name)
|
||||
return _tracked_compile
|
||||
|
||||
@classmethod
|
||||
def wrap_compile_directive(cls, method_compile_directive):
|
||||
@functools.wraps(method_compile_directive)
|
||||
def _tracked_compile_directive(self, el, options, directive, level):
|
||||
if not options.get('profile') or directive in ('inner-content', 'tag-open', 'tag-close'):
|
||||
return method_compile_directive(self, el, options, directive, level)
|
||||
enter = f"{' ' * 4 * level}self.env.context['qweb_tracker'].enter_directive({directive!r}, {el.attrib!r}, {options['_qweb_error_path_xml'][0]!r})"
|
||||
leave = f"{' ' * 4 * level}self.env.context['qweb_tracker'].leave_directive({directive!r}, {el.attrib!r}, {options['_qweb_error_path_xml'][0]!r})"
|
||||
code_directive = method_compile_directive(self, el, options, directive, level)
|
||||
return [enter, *code_directive, leave] if code_directive else []
|
||||
return _tracked_compile_directive
|
||||
|
||||
def __init__(self, view_id, arch, cr):
|
||||
current_thread = threading.current_thread() # don't store current_thread on self
|
||||
self.execution_context_enabled = getattr(current_thread, 'profiler_params', {}).get('execution_context_qweb')
|
||||
self.qweb_hooks = getattr(current_thread, 'qweb_hooks', ())
|
||||
self.context_stack = []
|
||||
self.cr = cr
|
||||
self.view_id = view_id
|
||||
for hook in self.qweb_hooks:
|
||||
hook('render', self.cr.sql_log_count, view_id=view_id, arch=arch)
|
||||
|
||||
def enter_directive(self, directive, attrib, xpath):
|
||||
execution_context = None
|
||||
if self.execution_context_enabled:
|
||||
directive_info = {}
|
||||
if ('t-' + directive) in attrib:
|
||||
directive_info['t-' + directive] = repr(attrib['t-' + directive])
|
||||
if directive == 'set':
|
||||
if 't-value' in attrib:
|
||||
directive_info['t-value'] = repr(attrib['t-value'])
|
||||
if 't-valuef' in attrib:
|
||||
directive_info['t-valuef'] = repr(attrib['t-valuef'])
|
||||
|
||||
for key in attrib:
|
||||
if key.startswith('t-set-') or key.startswith('t-setf-'):
|
||||
directive_info[key] = repr(attrib[key])
|
||||
elif directive == 'foreach':
|
||||
directive_info['t-as'] = repr(attrib['t-as'])
|
||||
elif directive == 'groups' and 'groups' in attrib and not directive_info.get('t-groups'):
|
||||
directive_info['t-groups'] = repr(attrib['groups'])
|
||||
elif directive == 'att':
|
||||
for key in attrib:
|
||||
if key.startswith('t-att-') or key.startswith('t-attf-'):
|
||||
directive_info[key] = repr(attrib[key])
|
||||
elif directive == 'options':
|
||||
for key in attrib:
|
||||
if key.startswith('t-options-'):
|
||||
directive_info[key] = repr(attrib[key])
|
||||
elif ('t-' + directive) not in attrib:
|
||||
directive_info['t-' + directive] = None
|
||||
|
||||
execution_context = tools.profiler.ExecutionContext(**directive_info, xpath=xpath)
|
||||
execution_context.__enter__()
|
||||
self.context_stack.append(execution_context)
|
||||
|
||||
for hook in self.qweb_hooks:
|
||||
hook('enter', self.cr.sql_log_count, view_id=self.view_id, xpath=xpath, directive=directive, attrib=attrib)
|
||||
|
||||
def leave_directive(self, directive, attrib, xpath):
|
||||
if self.execution_context_enabled:
|
||||
self.context_stack.pop().__exit__()
|
||||
|
||||
for hook in self.qweb_hooks:
|
||||
hook('leave', self.cr.sql_log_count, view_id=self.view_id, xpath=xpath, directive=directive, attrib=attrib)
|
||||
|
||||
|
||||
class QwebCollector(Collector):
|
||||
"""
|
||||
Record qweb execution with directive trace.
|
||||
"""
|
||||
name = 'qweb'
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.events = []
|
||||
|
||||
def hook(event, sql_log_count, **kwargs):
|
||||
self.events.append((event, kwargs, sql_log_count, real_time()))
|
||||
self.hook = hook
|
||||
|
||||
def _get_directive_profiling_name(self, directive, attrib):
|
||||
expr = ''
|
||||
if directive == 'set':
|
||||
if 't-set' in attrib:
|
||||
expr = f"t-set={repr(attrib['t-set'])}"
|
||||
if 't-value' in attrib:
|
||||
expr += f" t-value={repr(attrib['t-value'])}"
|
||||
if 't-valuef' in attrib:
|
||||
expr += f" t-valuef={repr(attrib['t-valuef'])}"
|
||||
for key in attrib:
|
||||
if key.startswith('t-set-') or key.startswith('t-setf-'):
|
||||
if expr:
|
||||
expr += ' '
|
||||
expr += f"{key}={repr(attrib[key])}"
|
||||
elif directive == 'foreach':
|
||||
expr = f"t-foreach={repr(attrib['t-foreach'])} t-as={repr(attrib['t-as'])}"
|
||||
elif directive == 'options':
|
||||
if attrib.get('t-options'):
|
||||
expr = f"t-options={repr(attrib['t-options'])}"
|
||||
for key in attrib:
|
||||
if key.startswith('t-options-'):
|
||||
expr = f"{expr} {key}={repr(attrib[key])}"
|
||||
elif directive == 'att':
|
||||
for key in attrib:
|
||||
if key == 't-att' or key.startswith('t-att-') or key.startswith('t-attf-'):
|
||||
if expr:
|
||||
expr += ' '
|
||||
expr += f"{key}={repr(attrib[key])}"
|
||||
elif ('t-' + directive) in attrib:
|
||||
expr = f"t-{directive}={repr(attrib['t-' + directive])}"
|
||||
else:
|
||||
expr = f"t-{directive}"
|
||||
|
||||
return expr
|
||||
|
||||
def start(self):
|
||||
init_thread = self.profiler.init_thread
|
||||
if not hasattr(init_thread, 'qweb_hooks'):
|
||||
init_thread.qweb_hooks = []
|
||||
init_thread.qweb_hooks.append(self.hook)
|
||||
|
||||
def stop(self):
|
||||
self.profiler.init_thread.qweb_hooks.remove(self.hook)
|
||||
|
||||
def post_process(self):
|
||||
last_event_query = None
|
||||
last_event_time = None
|
||||
stack = []
|
||||
results = []
|
||||
archs = {}
|
||||
for event, kwargs, sql_count, time in self.events:
|
||||
if event == 'render':
|
||||
archs[kwargs['view_id']] = kwargs['arch']
|
||||
continue
|
||||
|
||||
# update the active directive with the elapsed time and queries
|
||||
if stack:
|
||||
top = stack[-1]
|
||||
top['delay'] += time - last_event_time
|
||||
top['query'] += sql_count - last_event_query
|
||||
last_event_time = time
|
||||
last_event_query = sql_count
|
||||
|
||||
directive = self._get_directive_profiling_name(kwargs['directive'], kwargs['attrib'])
|
||||
if directive:
|
||||
if event == 'enter':
|
||||
data = {
|
||||
'view_id': kwargs['view_id'],
|
||||
'xpath': kwargs['xpath'],
|
||||
'directive': directive,
|
||||
'delay': 0,
|
||||
'query': 0,
|
||||
}
|
||||
results.append(data)
|
||||
stack.append(data)
|
||||
else:
|
||||
assert event == "leave"
|
||||
data = stack.pop()
|
||||
|
||||
self.add({'results': {'archs': archs, 'data': results}})
|
||||
super().post_process()
|
||||
|
||||
|
||||
class ExecutionContext:
|
||||
"""
|
||||
Add some context on thread at current call stack level.
|
||||
This context stored by collector beside stack and is used by Speedscope
|
||||
to add a level to the stack with this information.
|
||||
"""
|
||||
def __init__(self, **context):
|
||||
self.context = context
|
||||
self.previous_context = None
|
||||
|
||||
def __enter__(self):
|
||||
current_thread = threading.current_thread()
|
||||
self.previous_context = getattr(current_thread, 'exec_context', ())
|
||||
current_thread.exec_context = self.previous_context + ((stack_size(), self.context),)
|
||||
|
||||
def __exit__(self, *_args):
|
||||
threading.current_thread().exec_context = self.previous_context
|
||||
|
||||
|
||||
class Profiler:
|
||||
"""
|
||||
Context manager to use to start the recording of some execution.
|
||||
Will save sql and async stack trace by default.
|
||||
"""
|
||||
def __init__(self, collectors=None, db=..., profile_session=None,
|
||||
description=None, disable_gc=False, params=None):
|
||||
"""
|
||||
:param db: database name to use to save results.
|
||||
Will try to define database automatically by default.
|
||||
Use value ``None`` to not save results in a database.
|
||||
:param collectors: list of string and Collector object Ex: ['sql', PeriodicCollector(interval=0.2)]. Use `None` for default collectors
|
||||
:param profile_session: session description to use to reproup multiple profile. use make_session(name) for default format.
|
||||
:param description: description of the current profiler Suggestion: (route name/test method/loading module, ...)
|
||||
:param disable_gc: flag to disable gc durring profiling (usefull to avoid gc while profiling, especially during sql execution)
|
||||
:param params: parameters usable by collectors (like frame interval)
|
||||
"""
|
||||
self.start_time = 0
|
||||
self.duration = 0
|
||||
self.profile_session = profile_session or make_session()
|
||||
self.description = description
|
||||
self.init_frame = None
|
||||
self.init_stack_trace = None
|
||||
self.init_thread = None
|
||||
self.disable_gc = disable_gc
|
||||
self.filecache = {}
|
||||
self.params = params or {} # custom parameters usable by collectors
|
||||
self.profile_id = None
|
||||
self.entry_count_limit = int(self.params.get("entry_count_limit", 0)) # the limit could be set using a smarter way
|
||||
self.done = False
|
||||
|
||||
if db is ...:
|
||||
# determine database from current thread
|
||||
db = getattr(threading.current_thread(), 'dbname', None)
|
||||
if not db:
|
||||
# only raise if path is not given and db is not explicitely disabled
|
||||
raise Exception('Database name cannot be defined automaticaly. \n Please provide a valid/falsy dbname or path parameter')
|
||||
self.db = db
|
||||
|
||||
# collectors
|
||||
if collectors is None:
|
||||
collectors = ['sql', 'traces_async']
|
||||
self.collectors = []
|
||||
for collector in collectors:
|
||||
if isinstance(collector, str):
|
||||
try:
|
||||
collector = Collector.make(collector)
|
||||
except Exception:
|
||||
_logger.error("Could not create collector with name %r", collector)
|
||||
continue
|
||||
collector.profiler = self
|
||||
self.collectors.append(collector)
|
||||
|
||||
def __enter__(self):
|
||||
self.init_thread = threading.current_thread()
|
||||
try:
|
||||
self.init_frame = get_current_frame(self.init_thread)
|
||||
self.init_stack_trace = _get_stack_trace(self.init_frame)
|
||||
except KeyError:
|
||||
# when using thread pools (gevent) the thread won't exist in the current_frames
|
||||
# this case is managed by http.py but will still fail when adding a profiler
|
||||
# inside a piece of code that may be called by a longpolling route.
|
||||
# in this case, avoid crashing the caller and disable all collectors
|
||||
self.init_frame = self.init_stack_trace = self.collectors = []
|
||||
self.db = self.params = None
|
||||
message = "Cannot start profiler, thread not found. Is the thread part of a thread pool?"
|
||||
if not self.description:
|
||||
self.description = message
|
||||
_logger.warning(message)
|
||||
|
||||
if self.description is None:
|
||||
frame = self.init_frame
|
||||
code = frame.f_code
|
||||
self.description = f"{frame.f_code.co_name} ({code.co_filename}:{frame.f_lineno})"
|
||||
if self.params:
|
||||
self.init_thread.profiler_params = self.params
|
||||
if self.disable_gc and gc.isenabled():
|
||||
gc.disable()
|
||||
self.start_time = real_time()
|
||||
for collector in self.collectors:
|
||||
collector.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.end()
|
||||
|
||||
def end(self):
|
||||
if self.done:
|
||||
return
|
||||
self.done = True
|
||||
try:
|
||||
for collector in self.collectors:
|
||||
collector.stop()
|
||||
self.duration = real_time() - self.start_time
|
||||
self._add_file_lines(self.init_stack_trace)
|
||||
|
||||
if self.db:
|
||||
# pylint: disable=import-outside-toplevel
|
||||
from odoo.sql_db import db_connect # only import from odoo if/when needed.
|
||||
with db_connect(self.db).cursor() as cr:
|
||||
values = {
|
||||
"name": self.description,
|
||||
"session": self.profile_session,
|
||||
"create_date": real_datetime_now(),
|
||||
"init_stack_trace": json.dumps(_format_stack(self.init_stack_trace)),
|
||||
"duration": self.duration,
|
||||
"entry_count": self.entry_count(),
|
||||
"sql_count": sum(len(collector.entries) for collector in self.collectors if collector.name == 'sql')
|
||||
}
|
||||
for collector in self.collectors:
|
||||
if collector.entries:
|
||||
values[collector.name] = json.dumps(collector.entries)
|
||||
query = sql.SQL("INSERT INTO {}({}) VALUES %s RETURNING id").format(
|
||||
sql.Identifier("ir_profile"),
|
||||
sql.SQL(",").join(map(sql.Identifier, values)),
|
||||
)
|
||||
cr.execute(query, [tuple(values.values())])
|
||||
self.profile_id = cr.fetchone()[0]
|
||||
_logger.info('ir_profile %s (%s) created', self.profile_id, self.profile_session)
|
||||
finally:
|
||||
if self.disable_gc:
|
||||
gc.enable()
|
||||
if self.params:
|
||||
del self.init_thread.profiler_params
|
||||
|
||||
def _get_cm_proxy(self):
|
||||
return _Nested(self)
|
||||
|
||||
def _add_file_lines(self, stack):
|
||||
for index, frame in enumerate(stack):
|
||||
(filename, lineno, name, line) = frame
|
||||
if line != '':
|
||||
continue
|
||||
# retrieve file lines from the filecache
|
||||
if not lineno:
|
||||
continue
|
||||
try:
|
||||
filelines = self.filecache[filename]
|
||||
except KeyError:
|
||||
try:
|
||||
with tools.file_open(filename, filter_ext=('.py',)) as f:
|
||||
filelines = f.readlines()
|
||||
except (ValueError, FileNotFoundError): # mainly for <decorator> "filename"
|
||||
filelines = None
|
||||
self.filecache[filename] = filelines
|
||||
# fill in the line
|
||||
if filelines is not None:
|
||||
line = filelines[lineno - 1]
|
||||
stack[index] = (filename, lineno, name, line)
|
||||
|
||||
def entry_count(self):
|
||||
""" Return the total number of entries collected in this profiler. """
|
||||
return sum(len(collector.entries) for collector in self.collectors)
|
||||
|
||||
def format_path(self, path):
|
||||
"""
|
||||
Utility function to format a path for this profiler.
|
||||
This is mainly useful to uniquify a path between executions.
|
||||
"""
|
||||
return path.format(
|
||||
time=real_datetime_now().strftime("%Y%m%d-%H%M%S"),
|
||||
len=self.entry_count(),
|
||||
desc=re.sub("[^0-9a-zA-Z-]+", "_", self.description)
|
||||
)
|
||||
|
||||
def json(self):
|
||||
"""
|
||||
Utility function to generate a json version of this profiler.
|
||||
This is useful to write profiling entries into a file, such as::
|
||||
|
||||
with Profiler(db=None) as profiler:
|
||||
do_stuff()
|
||||
|
||||
filename = p.format_path('/home/foo/{desc}_{len}.json')
|
||||
with open(filename, 'w') as f:
|
||||
f.write(profiler.json())
|
||||
"""
|
||||
return json.dumps({
|
||||
"name": self.description,
|
||||
"session": self.profile_session,
|
||||
"create_date": real_datetime_now().strftime("%Y%m%d-%H%M%S"),
|
||||
"init_stack_trace": _format_stack(self.init_stack_trace),
|
||||
"duration": self.duration,
|
||||
"collectors": {collector.name: collector.entries for collector in self.collectors},
|
||||
}, indent=4)
|
||||
|
||||
|
||||
class _Nested:
|
||||
__slots__ = ("__profiler",)
|
||||
|
||||
def __init__(self, profiler):
|
||||
self.__profiler = profiler
|
||||
|
||||
def __enter__(self):
|
||||
self.__profiler.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self.__profiler.__exit__(*args)
|
||||
|
||||
|
||||
class Nested:
|
||||
"""
|
||||
Utility to nest another context manager inside a profiler.
|
||||
|
||||
The profiler should only be called directly in the "with" without nesting it
|
||||
with ExitStack. If not, the retrieval of the 'init_frame' may be incorrect
|
||||
and lead to an error "Limit frame was not found" when profiling. Since the
|
||||
stack will ignore all stack frames inside this file, the nested frames will
|
||||
be ignored, too. This is also why Nested() does not use
|
||||
contextlib.contextmanager.
|
||||
"""
|
||||
def __init__(self, profiler, context_manager):
|
||||
self.profiler = profiler
|
||||
self.context_manager = context_manager
|
||||
|
||||
def __enter__(self):
|
||||
self.profiler.__enter__()
|
||||
return self.context_manager.__enter__()
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
try:
|
||||
return self.context_manager.__exit__(exc_type, exc_value, traceback)
|
||||
finally:
|
||||
self.profiler.__exit__(exc_type, exc_value, traceback)
|
||||
41
odoo-bringout-oca-ocb-base/odoo/tools/pycompat.py
Normal file
41
odoo-bringout-oca-ocb-base/odoo/tools/pycompat.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#pylint: disable=deprecated-module
|
||||
import csv
|
||||
import codecs
|
||||
import io
|
||||
|
||||
_reader = codecs.getreader('utf-8')
|
||||
_writer = codecs.getwriter('utf-8')
|
||||
|
||||
|
||||
def csv_reader(stream, **params):
|
||||
assert not isinstance(stream, io.TextIOBase),\
|
||||
"For cross-compatibility purposes, csv_reader takes a bytes stream"
|
||||
return csv.reader(_reader(stream), **params)
|
||||
|
||||
|
||||
def csv_writer(stream, **params):
|
||||
assert not isinstance(stream, io.TextIOBase), \
|
||||
"For cross-compatibility purposes, csv_writer takes a bytes stream"
|
||||
return csv.writer(_writer(stream), **params)
|
||||
|
||||
|
||||
def to_text(source):
|
||||
""" Generates a text value (an instance of text_type) from an arbitrary
|
||||
source.
|
||||
|
||||
* False and None are converted to empty strings
|
||||
* text is passed through
|
||||
* bytes are decoded as UTF-8
|
||||
* rest is textified via the current version's relevant data model method
|
||||
"""
|
||||
if source is None or source is False:
|
||||
return u''
|
||||
|
||||
if isinstance(source, bytes):
|
||||
return source.decode('utf-8')
|
||||
|
||||
if isinstance(source, str):
|
||||
return source
|
||||
|
||||
return str(source)
|
||||
248
odoo-bringout-oca-ocb-base/odoo/tools/query.py
Normal file
248
odoo-bringout-oca-ocb-base/odoo/tools/query.py
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
import re
|
||||
import warnings
|
||||
from zlib import crc32
|
||||
|
||||
from .func import lazy_property
|
||||
|
||||
IDENT_RE = re.compile(r'^[a-z_][a-z0-9_$]*$', re.I)
|
||||
|
||||
|
||||
def _from_table(table, alias):
|
||||
""" Return a FROM clause element from ``table`` and ``alias``. """
|
||||
if alias == table:
|
||||
return f'"{alias}"'
|
||||
elif IDENT_RE.match(table):
|
||||
return f'"{table}" AS "{alias}"'
|
||||
else:
|
||||
return f'({table}) AS "{alias}"'
|
||||
|
||||
|
||||
def _generate_table_alias(src_table_alias, link):
|
||||
""" Generate a standard table alias name. An alias is generated as following:
|
||||
|
||||
- the base is the source table name (that can already be an alias)
|
||||
- then, the joined table is added in the alias using a 'link field name'
|
||||
that is used to render unique aliases for a given path
|
||||
- the name is shortcut if it goes beyond PostgreSQL's identifier limits
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> _generate_table_alias('res_users', link='parent_id')
|
||||
'res_users__parent_id'
|
||||
|
||||
:param str src_table_alias: alias of the source table
|
||||
:param str link: field name
|
||||
:return str: alias
|
||||
"""
|
||||
alias = "%s__%s" % (src_table_alias, link)
|
||||
# Use an alternate alias scheme if length exceeds the PostgreSQL limit
|
||||
# of 63 characters.
|
||||
if len(alias) >= 64:
|
||||
# We have to fit a crc32 hash and one underscore into a 63 character
|
||||
# alias. The remaining space we can use to add a human readable prefix.
|
||||
alias = "%s_%08x" % (alias[:54], crc32(alias.encode('utf-8')))
|
||||
return alias
|
||||
|
||||
|
||||
class Query(object):
|
||||
""" Simple implementation of a query object, managing tables with aliases,
|
||||
join clauses (with aliases, condition and parameters), where clauses (with
|
||||
parameters), order, limit and offset.
|
||||
|
||||
:param cr: database cursor (for lazy evaluation)
|
||||
:param alias: name or alias of the table
|
||||
:param table: if given, a table expression (identifier or query)
|
||||
"""
|
||||
|
||||
def __init__(self, cr, alias, table=None):
|
||||
# database cursor
|
||||
self._cr = cr
|
||||
|
||||
# tables {alias: table}
|
||||
self._tables = {alias: table or alias}
|
||||
|
||||
# joins {alias: (kind, table, condition, condition_params)}
|
||||
self._joins = {}
|
||||
|
||||
# holds the list of WHERE clause elements (to be joined with 'AND'), and
|
||||
# the list of parameters
|
||||
self._where_clauses = []
|
||||
self._where_params = []
|
||||
|
||||
# order, limit, offset
|
||||
self.order = None
|
||||
self.limit = None
|
||||
self.offset = None
|
||||
|
||||
def add_table(self, alias, table=None):
|
||||
""" Add a table with a given alias to the from clause. """
|
||||
assert alias not in self._tables and alias not in self._joins, "Alias %r already in %s" % (alias, str(self))
|
||||
self._tables[alias] = table or alias
|
||||
|
||||
def add_where(self, where_clause, where_params=()):
|
||||
""" Add a condition to the where clause. """
|
||||
self._where_clauses.append(where_clause)
|
||||
self._where_params.extend(where_params)
|
||||
|
||||
def join(self, lhs_alias, lhs_column, rhs_table, rhs_column, link, extra=None, extra_params=()):
|
||||
"""
|
||||
Perform a join between a table already present in the current Query object and
|
||||
another table.
|
||||
|
||||
:param str lhs_alias: alias of a table already defined in the current Query object.
|
||||
:param str lhs_column: column of `lhs_alias` to be used for the join's ON condition.
|
||||
:param str rhs_table: name of the table to join to `lhs_alias`.
|
||||
:param str rhs_column: column of `rhs_alias` to be used for the join's ON condition.
|
||||
:param str link: used to generate the alias for the joined table, this string should
|
||||
represent the relationship (the link) between both tables.
|
||||
:param str extra: an sql string of a predicate or series of predicates to append to the
|
||||
join's ON condition, `lhs_alias` and `rhs_alias` can be injected if the string uses
|
||||
the `lhs` and `rhs` variables with the `str.format` syntax. e.g.::
|
||||
|
||||
query.join(..., extra="{lhs}.name != {rhs}.name OR ...", ...)
|
||||
|
||||
:param tuple extra_params: a tuple of values to be interpolated into `extra`, this is
|
||||
done by psycopg2.
|
||||
|
||||
Full example:
|
||||
|
||||
>>> rhs_alias = query.join(
|
||||
... "res_users",
|
||||
... "partner_id",
|
||||
... "res_partner",
|
||||
... "id",
|
||||
... "partner_id", # partner_id is the "link" from res_users to res_partner
|
||||
... "{lhs}.\"name\" != %s",
|
||||
... ("Mitchell Admin",),
|
||||
... )
|
||||
>>> rhs_alias
|
||||
res_users_res_partner__partner_id
|
||||
|
||||
From the example above, the resulting query would be something like::
|
||||
|
||||
SELECT ...
|
||||
FROM "res_users" AS "res_users"
|
||||
JOIN "res_partner" AS "res_users_res_partner__partner_id"
|
||||
ON "res_users"."partner_id" = "res_users_res_partner__partner_id"."id"
|
||||
AND "res_users"."name" != 'Mitchell Admin'
|
||||
WHERE ...
|
||||
|
||||
"""
|
||||
return self._join('JOIN', lhs_alias, lhs_column, rhs_table, rhs_column, link, extra, extra_params)
|
||||
|
||||
def left_join(self, lhs_alias, lhs_column, rhs_table, rhs_column, link, extra=None, extra_params=()):
|
||||
""" Add a LEFT JOIN to the current table (if necessary), and return the
|
||||
alias corresponding to ``rhs_table``.
|
||||
|
||||
See the documentation of :meth:`join` for a better overview of the
|
||||
arguments and what they do.
|
||||
"""
|
||||
return self._join('LEFT JOIN', lhs_alias, lhs_column, rhs_table, rhs_column, link, extra, extra_params)
|
||||
|
||||
def _join(self, kind, lhs_alias, lhs_column, rhs_table, rhs_column, link, extra=None, extra_params=()):
|
||||
assert lhs_alias in self._tables or lhs_alias in self._joins, "Alias %r not in %s" % (lhs_alias, str(self))
|
||||
|
||||
rhs_alias = _generate_table_alias(lhs_alias, link)
|
||||
assert rhs_alias not in self._tables, "Alias %r already in %s" % (rhs_alias, str(self))
|
||||
|
||||
if rhs_alias not in self._joins:
|
||||
condition = f'"{lhs_alias}"."{lhs_column}" = "{rhs_alias}"."{rhs_column}"'
|
||||
condition_params = []
|
||||
if extra:
|
||||
condition = condition + " AND " + extra.format(lhs=lhs_alias, rhs=rhs_alias)
|
||||
condition_params = list(extra_params)
|
||||
if kind:
|
||||
self._joins[rhs_alias] = (kind, rhs_table, condition, condition_params)
|
||||
else:
|
||||
self._tables[rhs_alias] = rhs_table
|
||||
self.add_where(condition, condition_params)
|
||||
|
||||
return rhs_alias
|
||||
|
||||
def select(self, *args):
|
||||
""" Return the SELECT query as a pair ``(query_string, query_params)``. """
|
||||
from_clause, where_clause, params = self.get_sql()
|
||||
query_str = 'SELECT {} FROM {} WHERE {}{}{}{}'.format(
|
||||
", ".join(args or [f'"{next(iter(self._tables))}".id']),
|
||||
from_clause,
|
||||
where_clause or "TRUE",
|
||||
(" ORDER BY %s" % self.order) if self.order else "",
|
||||
(" LIMIT %d" % self.limit) if self.limit else "",
|
||||
(" OFFSET %d" % self.offset) if self.offset else "",
|
||||
)
|
||||
return query_str, params
|
||||
|
||||
def subselect(self, *args):
|
||||
""" Similar to :meth:`.select`, but for sub-queries.
|
||||
This one avoids the ORDER BY clause when possible.
|
||||
"""
|
||||
if self.limit or self.offset:
|
||||
# in this case, the ORDER BY clause is necessary
|
||||
return self.select(*args)
|
||||
|
||||
from_clause, where_clause, params = self.get_sql()
|
||||
query_str = 'SELECT {} FROM {} WHERE {}'.format(
|
||||
", ".join(args or [f'"{next(iter(self._tables))}".id']),
|
||||
from_clause,
|
||||
where_clause or "TRUE",
|
||||
)
|
||||
return query_str, params
|
||||
|
||||
def get_sql(self):
|
||||
""" Returns (query_from, query_where, query_params). """
|
||||
tables = [_from_table(table, alias) for alias, table in self._tables.items()]
|
||||
joins = []
|
||||
params = []
|
||||
for alias, (kind, table, condition, condition_params) in self._joins.items():
|
||||
joins.append(f'{kind} {_from_table(table, alias)} ON ({condition})')
|
||||
params.extend(condition_params)
|
||||
|
||||
from_clause = " ".join([", ".join(tables)] + joins)
|
||||
where_clause = " AND ".join(self._where_clauses)
|
||||
return from_clause, where_clause, params + self._where_params
|
||||
|
||||
@lazy_property
|
||||
def _result(self):
|
||||
query_str, params = self.select()
|
||||
self._cr.execute(query_str, params)
|
||||
return [row[0] for row in self._cr.fetchall()]
|
||||
|
||||
def __str__(self):
|
||||
return '<osv.Query: %r with params: %r>' % self.select()
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self._result)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._result)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._result)
|
||||
|
||||
#
|
||||
# deprecated attributes and methods
|
||||
#
|
||||
@property
|
||||
def tables(self):
|
||||
warnings.warn("deprecated Query.tables, use Query.get_sql() instead",
|
||||
DeprecationWarning)
|
||||
return tuple(_from_table(table, alias) for alias, table in self._tables.items())
|
||||
|
||||
@property
|
||||
def where_clause(self):
|
||||
return tuple(self._where_clauses)
|
||||
|
||||
@property
|
||||
def where_clause_params(self):
|
||||
return tuple(self._where_params)
|
||||
|
||||
def add_join(self, connection, implicit=True, outer=False, extra=None, extra_params=()):
|
||||
warnings.warn("deprecated Query.add_join, use Query.join() or Query.left_join() instead",
|
||||
DeprecationWarning)
|
||||
lhs_alias, rhs_table, lhs_column, rhs_column, link = connection
|
||||
kind = '' if implicit else ('LEFT JOIN' if outer else 'JOIN')
|
||||
rhs_alias = self._join(kind, lhs_alias, lhs_column, rhs_table, rhs_column, link, extra, extra_params)
|
||||
return rhs_alias, _from_table(rhs_table, rhs_alias)
|
||||
75
odoo-bringout-oca-ocb-base/odoo/tools/rendering_tools.py
Normal file
75
odoo-bringout-oca-ocb-base/odoo/tools/rendering_tools.py
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
import dateutil.relativedelta as relativedelta
|
||||
import functools
|
||||
import re
|
||||
|
||||
from markupsafe import Markup
|
||||
from werkzeug import urls
|
||||
|
||||
from odoo.tools import safe_eval
|
||||
|
||||
INLINE_TEMPLATE_REGEX = re.compile(r"\{\{(.+?)\}\}")
|
||||
|
||||
def relativedelta_proxy(*args, **kwargs):
|
||||
# dateutil.relativedelta is an old-style class and cannot be directly
|
||||
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
|
||||
# is needed, apparently
|
||||
return relativedelta.relativedelta(*args, **kwargs)
|
||||
|
||||
template_env_globals = {
|
||||
'str': str,
|
||||
'quote': urls.url_quote,
|
||||
'urlencode': urls.url_encode,
|
||||
'datetime': safe_eval.datetime,
|
||||
'len': len,
|
||||
'abs': abs,
|
||||
'min': min,
|
||||
'max': max,
|
||||
'sum': sum,
|
||||
'filter': filter,
|
||||
'reduce': functools.reduce,
|
||||
'map': map,
|
||||
'relativedelta': relativedelta.relativedelta,
|
||||
'round': round,
|
||||
'hasattr': hasattr,
|
||||
}
|
||||
|
||||
def parse_inline_template(text):
|
||||
groups = []
|
||||
current_literal_index = 0
|
||||
for match in INLINE_TEMPLATE_REGEX.finditer(text):
|
||||
literal = text[current_literal_index:match.start()]
|
||||
expression = match.group(1)
|
||||
groups.append((literal, expression))
|
||||
current_literal_index = match.end()
|
||||
|
||||
# string past last regex match
|
||||
literal = text[current_literal_index:]
|
||||
if literal:
|
||||
groups.append((literal, ''))
|
||||
|
||||
return groups
|
||||
|
||||
def convert_inline_template_to_qweb(template):
|
||||
template_instructions = parse_inline_template(template or '')
|
||||
preview_markup = []
|
||||
for string, expression in template_instructions:
|
||||
if expression:
|
||||
preview_markup.append(Markup('{}<t t-out="{}"/>').format(string, expression))
|
||||
else:
|
||||
preview_markup.append(string)
|
||||
return Markup('').join(preview_markup)
|
||||
|
||||
def render_inline_template(template_instructions, variables):
|
||||
results = []
|
||||
for string, expression in template_instructions:
|
||||
results.append(string)
|
||||
|
||||
if expression:
|
||||
result = safe_eval.safe_eval(expression, variables)
|
||||
if result:
|
||||
results.append(str(result))
|
||||
|
||||
return ''.join(results)
|
||||
489
odoo-bringout-oca-ocb-base/odoo/tools/safe_eval.py
Normal file
489
odoo-bringout-oca-ocb-base/odoo/tools/safe_eval.py
Normal file
|
|
@ -0,0 +1,489 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
"""
|
||||
safe_eval module - methods intended to provide more restricted alternatives to
|
||||
evaluate simple and/or untrusted code.
|
||||
|
||||
Methods in this module are typically used as alternatives to eval() to parse
|
||||
OpenERP domain strings, conditions and expressions, mostly based on locals
|
||||
condition/math builtins.
|
||||
"""
|
||||
|
||||
# Module partially ripped from/inspired by several different sources:
|
||||
# - http://code.activestate.com/recipes/286134/
|
||||
# - safe_eval in lp:~xrg/openobject-server/optimize-5.0
|
||||
# - safe_eval in tryton http://hg.tryton.org/hgwebdir.cgi/trytond/rev/bbb5f73319ad
|
||||
import dis
|
||||
import functools
|
||||
import logging
|
||||
import sys
|
||||
import types
|
||||
from opcode import HAVE_ARGUMENT, opmap, opname
|
||||
from types import CodeType
|
||||
|
||||
import werkzeug
|
||||
from psycopg2 import OperationalError
|
||||
|
||||
from .misc import ustr
|
||||
|
||||
import odoo
|
||||
|
||||
unsafe_eval = eval
|
||||
|
||||
__all__ = ['test_expr', 'safe_eval', 'const_eval']
|
||||
|
||||
# The time module is usually already provided in the safe_eval environment
|
||||
# but some code, e.g. datetime.datetime.now() (Windows/Python 2.5.2, bug
|
||||
# lp:703841), does import time.
|
||||
_ALLOWED_MODULES = ['_strptime', 'math', 'time']
|
||||
|
||||
# Mock __import__ function, as called by cpython's import emulator `PyImport_Import` inside
|
||||
# timemodule.c, _datetimemodule.c and others.
|
||||
# This function does not actually need to do anything, its expected side-effect is to make the
|
||||
# imported module available in `sys.modules`. The _ALLOWED_MODULES are imported below to make it so.
|
||||
def _import(name, globals=None, locals=None, fromlist=None, level=-1):
|
||||
if name not in sys.modules:
|
||||
raise ImportError(f'module {name} should be imported before calling safe_eval()')
|
||||
|
||||
for module in _ALLOWED_MODULES:
|
||||
__import__(module)
|
||||
|
||||
|
||||
_UNSAFE_ATTRIBUTES = [
|
||||
# Frames
|
||||
'f_builtins', 'f_code', 'f_globals', 'f_locals',
|
||||
# Python 2 functions
|
||||
'func_code', 'func_globals',
|
||||
# Code object
|
||||
'co_code', '_co_code_adaptive',
|
||||
# Method resolution order,
|
||||
'mro',
|
||||
# Tracebacks
|
||||
'tb_frame',
|
||||
# Generators
|
||||
'gi_code', 'gi_frame', 'gi_yieldfrom',
|
||||
# Coroutines
|
||||
'cr_await', 'cr_code', 'cr_frame',
|
||||
# Coroutine generators
|
||||
'ag_await', 'ag_code', 'ag_frame',
|
||||
]
|
||||
|
||||
|
||||
def to_opcodes(opnames, _opmap=opmap):
|
||||
for x in opnames:
|
||||
if x in _opmap:
|
||||
yield _opmap[x]
|
||||
# opcodes which absolutely positively must not be usable in safe_eval,
|
||||
# explicitly subtracted from all sets of valid opcodes just in case
|
||||
_BLACKLIST = set(to_opcodes([
|
||||
# can't provide access to accessing arbitrary modules
|
||||
'IMPORT_STAR', 'IMPORT_NAME', 'IMPORT_FROM',
|
||||
# could allow replacing or updating core attributes on models & al, setitem
|
||||
# can be used to set field values
|
||||
'STORE_ATTR', 'DELETE_ATTR',
|
||||
# no reason to allow this
|
||||
'STORE_GLOBAL', 'DELETE_GLOBAL',
|
||||
]))
|
||||
# opcodes necessary to build literal values
|
||||
_CONST_OPCODES = set(to_opcodes([
|
||||
# stack manipulations
|
||||
'POP_TOP', 'ROT_TWO', 'ROT_THREE', 'ROT_FOUR', 'DUP_TOP', 'DUP_TOP_TWO',
|
||||
'LOAD_CONST',
|
||||
'RETURN_VALUE', # return the result of the literal/expr evaluation
|
||||
# literal collections
|
||||
'BUILD_LIST', 'BUILD_MAP', 'BUILD_TUPLE', 'BUILD_SET',
|
||||
# 3.6: literal map with constant keys https://bugs.python.org/issue27140
|
||||
'BUILD_CONST_KEY_MAP',
|
||||
'LIST_EXTEND', 'SET_UPDATE',
|
||||
# 3.11 replace DUP_TOP, DUP_TOP_TWO, ROT_TWO, ROT_THREE, ROT_FOUR
|
||||
'COPY', 'SWAP',
|
||||
# Added in 3.11 https://docs.python.org/3/whatsnew/3.11.html#new-opcodes
|
||||
'RESUME',
|
||||
# 3.12 https://docs.python.org/3/whatsnew/3.12.html#cpython-bytecode-changes
|
||||
'RETURN_CONST',
|
||||
# 3.13
|
||||
'TO_BOOL',
|
||||
])) - _BLACKLIST
|
||||
|
||||
# operations which are both binary and inplace, same order as in doc'
|
||||
_operations = [
|
||||
'POWER', 'MULTIPLY', # 'MATRIX_MULTIPLY', # matrix operator (3.5+)
|
||||
'FLOOR_DIVIDE', 'TRUE_DIVIDE', 'MODULO', 'ADD',
|
||||
'SUBTRACT', 'LSHIFT', 'RSHIFT', 'AND', 'XOR', 'OR',
|
||||
]
|
||||
# operations on literal values
|
||||
_EXPR_OPCODES = _CONST_OPCODES.union(to_opcodes([
|
||||
'UNARY_POSITIVE', 'UNARY_NEGATIVE', 'UNARY_NOT', 'UNARY_INVERT',
|
||||
*('BINARY_' + op for op in _operations), 'BINARY_SUBSCR',
|
||||
*('INPLACE_' + op for op in _operations),
|
||||
'BUILD_SLICE',
|
||||
# comprehensions
|
||||
'LIST_APPEND', 'MAP_ADD', 'SET_ADD',
|
||||
'COMPARE_OP',
|
||||
# specialised comparisons
|
||||
'IS_OP', 'CONTAINS_OP',
|
||||
'DICT_MERGE', 'DICT_UPDATE',
|
||||
# Basically used in any "generator literal"
|
||||
'GEN_START', # added in 3.10 but already removed from 3.11.
|
||||
# Added in 3.11, replacing all BINARY_* and INPLACE_*
|
||||
'BINARY_OP',
|
||||
'BINARY_SLICE',
|
||||
])) - _BLACKLIST
|
||||
|
||||
_SAFE_OPCODES = _EXPR_OPCODES.union(to_opcodes([
|
||||
'POP_BLOCK', 'POP_EXCEPT',
|
||||
|
||||
# note: removed in 3.8
|
||||
'SETUP_LOOP', 'SETUP_EXCEPT', 'BREAK_LOOP', 'CONTINUE_LOOP',
|
||||
|
||||
'EXTENDED_ARG', # P3.6 for long jump offsets.
|
||||
'MAKE_FUNCTION', 'CALL_FUNCTION', 'CALL_FUNCTION_KW', 'CALL_FUNCTION_EX',
|
||||
# Added in P3.7 https://bugs.python.org/issue26110
|
||||
'CALL_METHOD', 'LOAD_METHOD',
|
||||
|
||||
'GET_ITER', 'FOR_ITER', 'YIELD_VALUE',
|
||||
'JUMP_FORWARD', 'JUMP_ABSOLUTE', 'JUMP_BACKWARD',
|
||||
'JUMP_IF_FALSE_OR_POP', 'JUMP_IF_TRUE_OR_POP', 'POP_JUMP_IF_FALSE', 'POP_JUMP_IF_TRUE',
|
||||
'SETUP_FINALLY', 'END_FINALLY',
|
||||
# Added in 3.8 https://bugs.python.org/issue17611
|
||||
'BEGIN_FINALLY', 'CALL_FINALLY', 'POP_FINALLY',
|
||||
|
||||
'RAISE_VARARGS', 'LOAD_NAME', 'STORE_NAME', 'DELETE_NAME', 'LOAD_ATTR',
|
||||
'LOAD_FAST', 'STORE_FAST', 'DELETE_FAST', 'UNPACK_SEQUENCE',
|
||||
'STORE_SUBSCR',
|
||||
'LOAD_GLOBAL',
|
||||
|
||||
'RERAISE', 'JUMP_IF_NOT_EXC_MATCH',
|
||||
|
||||
# Following opcodes were Added in 3.11
|
||||
# replacement of opcodes CALL_FUNCTION, CALL_FUNCTION_KW, CALL_METHOD
|
||||
'PUSH_NULL', 'PRECALL', 'CALL', 'KW_NAMES',
|
||||
# replacement of POP_JUMP_IF_TRUE and POP_JUMP_IF_FALSE
|
||||
'POP_JUMP_FORWARD_IF_FALSE', 'POP_JUMP_FORWARD_IF_TRUE',
|
||||
'POP_JUMP_BACKWARD_IF_FALSE', 'POP_JUMP_BACKWARD_IF_TRUE',
|
||||
# special case of the previous for IS NONE / IS NOT NONE
|
||||
'POP_JUMP_FORWARD_IF_NONE', 'POP_JUMP_BACKWARD_IF_NONE',
|
||||
'POP_JUMP_FORWARD_IF_NOT_NONE', 'POP_JUMP_BACKWARD_IF_NOT_NONE',
|
||||
# replacement of JUMP_IF_NOT_EXC_MATCH
|
||||
'CHECK_EXC_MATCH',
|
||||
# new opcodes
|
||||
'RETURN_GENERATOR',
|
||||
'PUSH_EXC_INFO',
|
||||
'NOP',
|
||||
'FORMAT_VALUE', 'BUILD_STRING',
|
||||
# 3.12 https://docs.python.org/3/whatsnew/3.12.html#cpython-bytecode-changes
|
||||
'END_FOR',
|
||||
'LOAD_FAST_AND_CLEAR', 'LOAD_FAST_CHECK',
|
||||
'POP_JUMP_IF_NOT_NONE', 'POP_JUMP_IF_NONE',
|
||||
'CALL_INTRINSIC_1',
|
||||
'STORE_SLICE',
|
||||
# 3.13
|
||||
'CALL_KW', 'LOAD_FAST_LOAD_FAST',
|
||||
'STORE_FAST_STORE_FAST', 'STORE_FAST_LOAD_FAST',
|
||||
'CONVERT_VALUE', 'FORMAT_SIMPLE', 'FORMAT_WITH_SPEC',
|
||||
'SET_FUNCTION_ATTRIBUTE',
|
||||
])) - _BLACKLIST
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
def assert_no_dunder_name(code_obj, expr):
|
||||
""" assert_no_dunder_name(code_obj, expr) -> None
|
||||
|
||||
Asserts that the code object does not refer to any "dunder name"
|
||||
(__$name__), so that safe_eval prevents access to any internal-ish Python
|
||||
attribute or method (both are loaded via LOAD_ATTR which uses a name, not a
|
||||
const or a var).
|
||||
|
||||
Checks that no such name exists in the provided code object (co_names).
|
||||
|
||||
:param code_obj: code object to name-validate
|
||||
:type code_obj: CodeType
|
||||
:param str expr: expression corresponding to the code object, for debugging
|
||||
purposes
|
||||
:raises NameError: in case a forbidden name (containing two underscores)
|
||||
is found in ``code_obj``
|
||||
|
||||
.. note:: actually forbids every name containing 2 underscores
|
||||
"""
|
||||
for name in code_obj.co_names:
|
||||
if "__" in name or name in _UNSAFE_ATTRIBUTES:
|
||||
raise NameError('Access to forbidden name %r (%r)' % (name, expr))
|
||||
|
||||
def assert_valid_codeobj(allowed_codes, code_obj, expr):
|
||||
""" Asserts that the provided code object validates against the bytecode
|
||||
and name constraints.
|
||||
|
||||
Recursively validates the code objects stored in its co_consts in case
|
||||
lambdas are being created/used (lambdas generate their own separated code
|
||||
objects and don't live in the root one)
|
||||
|
||||
:param allowed_codes: list of permissible bytecode instructions
|
||||
:type allowed_codes: set(int)
|
||||
:param code_obj: code object to name-validate
|
||||
:type code_obj: CodeType
|
||||
:param str expr: expression corresponding to the code object, for debugging
|
||||
purposes
|
||||
:raises ValueError: in case of forbidden bytecode in ``code_obj``
|
||||
:raises NameError: in case a forbidden name (containing two underscores)
|
||||
is found in ``code_obj``
|
||||
"""
|
||||
assert_no_dunder_name(code_obj, expr)
|
||||
|
||||
# set operations are almost twice as fast as a manual iteration + condition
|
||||
# when loading /web according to line_profiler
|
||||
code_codes = {i.opcode for i in dis.get_instructions(code_obj)}
|
||||
if not allowed_codes >= code_codes:
|
||||
raise ValueError("forbidden opcode(s) in %r: %s" % (expr, ', '.join(opname[x] for x in (code_codes - allowed_codes))))
|
||||
|
||||
for const in code_obj.co_consts:
|
||||
if isinstance(const, CodeType):
|
||||
assert_valid_codeobj(allowed_codes, const, 'lambda')
|
||||
|
||||
def test_expr(expr, allowed_codes, mode="eval", filename=None):
|
||||
"""test_expr(expression, allowed_codes[, mode[, filename]]) -> code_object
|
||||
|
||||
Test that the expression contains only the allowed opcodes.
|
||||
If the expression is valid and contains only allowed codes,
|
||||
return the compiled code object.
|
||||
Otherwise raise a ValueError, a Syntax Error or TypeError accordingly.
|
||||
|
||||
:param filename: optional pseudo-filename for the compiled expression,
|
||||
displayed for example in traceback frames
|
||||
:type filename: string
|
||||
"""
|
||||
try:
|
||||
if mode == 'eval':
|
||||
# eval() does not like leading/trailing whitespace
|
||||
expr = expr.strip()
|
||||
code_obj = compile(expr, filename or "", mode)
|
||||
except (SyntaxError, TypeError, ValueError):
|
||||
raise
|
||||
except Exception as e:
|
||||
raise ValueError('"%s" while compiling\n%r' % (ustr(e), expr))
|
||||
assert_valid_codeobj(allowed_codes, code_obj, expr)
|
||||
return code_obj
|
||||
|
||||
|
||||
def const_eval(expr):
|
||||
"""const_eval(expression) -> value
|
||||
|
||||
Safe Python constant evaluation
|
||||
|
||||
Evaluates a string that contains an expression describing
|
||||
a Python constant. Strings that are not valid Python expressions
|
||||
or that contain other code besides the constant raise ValueError.
|
||||
|
||||
>>> const_eval("10")
|
||||
10
|
||||
>>> const_eval("[1,2, (3,4), {'foo':'bar'}]")
|
||||
[1, 2, (3, 4), {'foo': 'bar'}]
|
||||
>>> const_eval("1+2")
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: opcode BINARY_ADD not allowed
|
||||
"""
|
||||
c = test_expr(expr, _CONST_OPCODES)
|
||||
return unsafe_eval(c)
|
||||
|
||||
def expr_eval(expr):
|
||||
"""expr_eval(expression) -> value
|
||||
|
||||
Restricted Python expression evaluation
|
||||
|
||||
Evaluates a string that contains an expression that only
|
||||
uses Python constants. This can be used to e.g. evaluate
|
||||
a numerical expression from an untrusted source.
|
||||
|
||||
>>> expr_eval("1+2")
|
||||
3
|
||||
>>> expr_eval("[1,2]*2")
|
||||
[1, 2, 1, 2]
|
||||
>>> expr_eval("__import__('sys').modules")
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: opcode LOAD_NAME not allowed
|
||||
"""
|
||||
c = test_expr(expr, _EXPR_OPCODES)
|
||||
return unsafe_eval(c)
|
||||
|
||||
_BUILTINS = {
|
||||
'__import__': _import,
|
||||
'True': True,
|
||||
'False': False,
|
||||
'None': None,
|
||||
'bytes': bytes,
|
||||
'str': str,
|
||||
'unicode': str,
|
||||
'bool': bool,
|
||||
'int': int,
|
||||
'float': float,
|
||||
'enumerate': enumerate,
|
||||
'dict': dict,
|
||||
'list': list,
|
||||
'tuple': tuple,
|
||||
'map': map,
|
||||
'abs': abs,
|
||||
'min': min,
|
||||
'max': max,
|
||||
'sum': sum,
|
||||
'reduce': functools.reduce,
|
||||
'filter': filter,
|
||||
'sorted': sorted,
|
||||
'round': round,
|
||||
'len': len,
|
||||
'repr': repr,
|
||||
'set': set,
|
||||
'all': all,
|
||||
'any': any,
|
||||
'ord': ord,
|
||||
'chr': chr,
|
||||
'divmod': divmod,
|
||||
'isinstance': isinstance,
|
||||
'range': range,
|
||||
'xrange': range,
|
||||
'zip': zip,
|
||||
'Exception': Exception,
|
||||
}
|
||||
def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=False, locals_builtins=False, filename=None):
|
||||
"""safe_eval(expression[, globals[, locals[, mode[, nocopy]]]]) -> result
|
||||
|
||||
System-restricted Python expression evaluation
|
||||
|
||||
Evaluates a string that contains an expression that mostly
|
||||
uses Python constants, arithmetic expressions and the
|
||||
objects directly provided in context.
|
||||
|
||||
This can be used to e.g. evaluate
|
||||
an OpenERP domain expression from an untrusted source.
|
||||
|
||||
:param filename: optional pseudo-filename for the compiled expression,
|
||||
displayed for example in traceback frames
|
||||
:type filename: string
|
||||
:throws TypeError: If the expression provided is a code object
|
||||
:throws SyntaxError: If the expression provided is not valid Python
|
||||
:throws NameError: If the expression provided accesses forbidden names
|
||||
:throws ValueError: If the expression provided uses forbidden bytecode
|
||||
"""
|
||||
if type(expr) is CodeType:
|
||||
raise TypeError("safe_eval does not allow direct evaluation of code objects.")
|
||||
|
||||
# prevent altering the globals/locals from within the sandbox
|
||||
# by taking a copy.
|
||||
if not nocopy:
|
||||
# isinstance() does not work below, we want *exactly* the dict class
|
||||
if (globals_dict is not None and type(globals_dict) is not dict) \
|
||||
or (locals_dict is not None and type(locals_dict) is not dict):
|
||||
_logger.warning(
|
||||
"Looks like you are trying to pass a dynamic environment, "
|
||||
"you should probably pass nocopy=True to safe_eval().")
|
||||
if globals_dict is not None:
|
||||
globals_dict = dict(globals_dict)
|
||||
if locals_dict is not None:
|
||||
locals_dict = dict(locals_dict)
|
||||
|
||||
check_values(globals_dict)
|
||||
check_values(locals_dict)
|
||||
|
||||
if globals_dict is None:
|
||||
globals_dict = {}
|
||||
|
||||
globals_dict['__builtins__'] = dict(_BUILTINS)
|
||||
if locals_builtins:
|
||||
if locals_dict is None:
|
||||
locals_dict = {}
|
||||
locals_dict.update(_BUILTINS)
|
||||
c = test_expr(expr, _SAFE_OPCODES, mode=mode, filename=filename)
|
||||
try:
|
||||
return unsafe_eval(c, globals_dict, locals_dict)
|
||||
except odoo.exceptions.UserError:
|
||||
raise
|
||||
except odoo.exceptions.RedirectWarning:
|
||||
raise
|
||||
except werkzeug.exceptions.HTTPException:
|
||||
raise
|
||||
except OperationalError:
|
||||
# Do not hide PostgreSQL low-level exceptions, to let the auto-replay
|
||||
# of serialized transactions work its magic
|
||||
raise
|
||||
except ZeroDivisionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise ValueError('%s: "%s" while evaluating\n%r' % (ustr(type(e)), ustr(e), expr))
|
||||
def test_python_expr(expr, mode="eval"):
|
||||
try:
|
||||
test_expr(expr, _SAFE_OPCODES, mode=mode)
|
||||
except (SyntaxError, TypeError, ValueError) as err:
|
||||
if len(err.args) >= 2 and len(err.args[1]) >= 4:
|
||||
error = {
|
||||
'message': err.args[0],
|
||||
'filename': err.args[1][0],
|
||||
'lineno': err.args[1][1],
|
||||
'offset': err.args[1][2],
|
||||
'error_line': err.args[1][3],
|
||||
}
|
||||
msg = "%s : %s at line %d\n%s" % (type(err).__name__, error['message'], error['lineno'], error['error_line'])
|
||||
else:
|
||||
msg = ustr(err)
|
||||
return msg
|
||||
return False
|
||||
|
||||
|
||||
def check_values(d):
|
||||
if not d:
|
||||
return d
|
||||
for v in d.values():
|
||||
if isinstance(v, types.ModuleType):
|
||||
raise TypeError(f"""Module {v} can not be used in evaluation contexts
|
||||
|
||||
Prefer providing only the items necessary for your intended use.
|
||||
|
||||
If a "module" is necessary for backwards compatibility, use
|
||||
`odoo.tools.safe_eval.wrap_module` to generate a wrapper recursively
|
||||
whitelisting allowed attributes.
|
||||
|
||||
Pre-wrapped modules are provided as attributes of `odoo.tools.safe_eval`.
|
||||
""")
|
||||
return d
|
||||
|
||||
class wrap_module:
|
||||
def __init__(self, module, attributes):
|
||||
"""Helper for wrapping a package/module to expose selected attributes
|
||||
|
||||
:param module: the actual package/module to wrap, as returned by ``import <module>``
|
||||
:param iterable attributes: attributes to expose / whitelist. If a dict,
|
||||
the keys are the attributes and the values
|
||||
are used as an ``attributes`` in case the
|
||||
corresponding item is a submodule
|
||||
"""
|
||||
# builtin modules don't have a __file__ at all
|
||||
modfile = getattr(module, '__file__', '(built-in)')
|
||||
self._repr = f"<wrapped {module.__name__!r} ({modfile})>"
|
||||
for attrib in attributes:
|
||||
target = getattr(module, attrib)
|
||||
if isinstance(target, types.ModuleType):
|
||||
target = wrap_module(target, attributes[attrib])
|
||||
setattr(self, attrib, target)
|
||||
|
||||
def __repr__(self):
|
||||
return self._repr
|
||||
|
||||
# dateutil submodules are lazy so need to import them for them to "exist"
|
||||
import dateutil
|
||||
mods = ['parser', 'relativedelta', 'rrule', 'tz']
|
||||
for mod in mods:
|
||||
__import__('dateutil.%s' % mod)
|
||||
datetime = wrap_module(__import__('datetime'), ['date', 'datetime', 'time', 'timedelta', 'timezone', 'tzinfo', 'MAXYEAR', 'MINYEAR'])
|
||||
dateutil = wrap_module(dateutil, {
|
||||
"tz": ["UTC", "tzutc"],
|
||||
"parser": ["isoparse", "parse"],
|
||||
"relativedelta": ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"],
|
||||
"rrule": ["rrule", "rruleset", "rrulestr", "YEARLY", "MONTHLY", "WEEKLY", "DAILY", "HOURLY", "MINUTELY", "SECONDLY", "MO", "TU", "WE", "TH", "FR", "SA", "SU"],
|
||||
})
|
||||
json = wrap_module(__import__('json'), ['loads', 'dumps'])
|
||||
time = wrap_module(__import__('time'), ['time', 'strptime', 'strftime', 'sleep'])
|
||||
pytz = wrap_module(__import__('pytz'), [
|
||||
'utc', 'UTC', 'timezone',
|
||||
])
|
||||
dateutil.tz.gettz = pytz.timezone
|
||||
179
odoo-bringout-oca-ocb-base/odoo/tools/sourcemap_generator.py
Normal file
179
odoo-bringout-oca-ocb-base/odoo/tools/sourcemap_generator.py
Normal file
|
|
@ -0,0 +1,179 @@
|
|||
from functools import lru_cache
|
||||
import json
|
||||
|
||||
|
||||
class SourceMapGenerator:
|
||||
"""
|
||||
The SourceMapGenerator creates the sourcemap maps the asset bundle to the js/css files.
|
||||
|
||||
What is a sourcemap ? (https://developer.mozilla.org/en-US/docs/Tools/Debugger/How_to/Use_a_source_map)
|
||||
In brief: a source map is what makes possible to debug your processed/compiled/minified code as if you were
|
||||
debugging the original, non-altered source code. It is a file that provides a mapping original <=> processed for
|
||||
the browser to read.
|
||||
|
||||
This implementation of the SourceMapGenerator is a translation and adaptation of this implementation
|
||||
in js https://github.com/mozilla/source-map. For performance purposes, we have removed all unnecessary
|
||||
functions/steps for our use case. This simpler version does a line by line mapping, with the ability to
|
||||
add offsets at the start and end of a file. (when we have to add comments on top a transpiled file by example).
|
||||
"""
|
||||
def __init__(self, source_root=None):
|
||||
self._file = None
|
||||
self._source_root = source_root
|
||||
self._sources = {}
|
||||
self._mappings = []
|
||||
self._sources_contents = {}
|
||||
self._version = 3
|
||||
self._cache = {}
|
||||
|
||||
def _serialize_mappings(self):
|
||||
"""
|
||||
A source map mapping is encoded with the base 64 VLQ format.
|
||||
This function encodes the readable source to the format.
|
||||
|
||||
:return the encoded content
|
||||
"""
|
||||
previous_generated_line = 1
|
||||
previous_original_line = 0
|
||||
previous_source = 0
|
||||
encoded_column = base64vlq_encode(0)
|
||||
result = ""
|
||||
for mapping in self._mappings:
|
||||
if mapping["generatedLine"] != previous_generated_line:
|
||||
while mapping["generatedLine"] > previous_generated_line:
|
||||
result += ";"
|
||||
previous_generated_line += 1
|
||||
|
||||
if mapping["source"] is not None:
|
||||
sourceIdx = self._sources[mapping["source"]]
|
||||
source = sourceIdx - previous_source
|
||||
previous_source = sourceIdx
|
||||
|
||||
# lines are stored 0-based in SourceMap spec version 3
|
||||
line = mapping["originalLine"] - 1 - previous_original_line
|
||||
previous_original_line = mapping["originalLine"] - 1
|
||||
|
||||
if (source, line) not in self._cache:
|
||||
self._cache[(source, line)] = "".join([
|
||||
encoded_column,
|
||||
base64vlq_encode(source),
|
||||
base64vlq_encode(line),
|
||||
encoded_column,
|
||||
])
|
||||
|
||||
result += self._cache[source, line]
|
||||
return result
|
||||
|
||||
def to_json(self):
|
||||
"""
|
||||
Generates the json sourcemap.
|
||||
It is the main function that assembles all the pieces.
|
||||
|
||||
:return {str} valid sourcemap in json format
|
||||
"""
|
||||
mapping = {
|
||||
"version": self._version,
|
||||
"sources": list(self._sources.keys()),
|
||||
"mappings": self._serialize_mappings(),
|
||||
"sourcesContent": [self._sources_contents[source] for source in self._sources]
|
||||
}
|
||||
if self._file:
|
||||
mapping["file"] = self._file
|
||||
|
||||
if self._source_root:
|
||||
mapping["sourceRoot"] = self._source_root
|
||||
|
||||
return mapping
|
||||
|
||||
def get_content(self):
|
||||
"""Generates the content of the sourcemap.
|
||||
|
||||
:return the content of the sourcemap as a string encoded in UTF-8.
|
||||
"""
|
||||
# Store with XSSI-prevention prefix
|
||||
return b")]}'\n" + json.dumps(self.to_json()).encode('utf8')
|
||||
|
||||
def add_source(self, source_name, source_content, last_index, start_offset=0):
|
||||
"""Adds a new source file in the sourcemap. All the lines of the source file will be mapped line by line
|
||||
to the generated file from the (last_index + start_offset). All lines between
|
||||
last_index and (last_index + start_offset) will
|
||||
be mapped to line 1 of the source file.
|
||||
|
||||
Example:
|
||||
ls 1 = Line 1 from new source file
|
||||
lg 1 = Line 1 from genereted file
|
||||
ls 1 <=> lg 1 Line 1 from new source file is map to Line 1 from genereted file
|
||||
nb_ls = number of lines in the new source file
|
||||
|
||||
Step 1:
|
||||
ls 1 <=> lg last_index + 1
|
||||
|
||||
Step 2:
|
||||
ls 1 <=> lg last_index + start_offset + 1
|
||||
ls 2 <=> lg last_index + start_offset + 2
|
||||
...
|
||||
ls nb_ls <=> lg last_index + start_offset + nb_ls
|
||||
|
||||
|
||||
:param source_name: name of the source to add
|
||||
:param source_content: content of the source to add
|
||||
:param last_index: Line where we start to map the new source
|
||||
:param start_offset: Number of lines to pass in the generated file before starting mapping line by line
|
||||
"""
|
||||
source_line_count = len(source_content.split("\n"))
|
||||
|
||||
self._sources.setdefault(source_name, len(self._sources))
|
||||
|
||||
self._sources_contents[source_name] = source_content
|
||||
if start_offset > 0:
|
||||
# adds a mapping between the first line of the source
|
||||
# and the first line of the corresponding code in the generated file.
|
||||
self._mappings.append({
|
||||
"generatedLine": last_index + 1,
|
||||
"originalLine": 1,
|
||||
"source": source_name,
|
||||
})
|
||||
for i in range(1, source_line_count + 1):
|
||||
self._mappings.append({
|
||||
"generatedLine": last_index + i + start_offset,
|
||||
"originalLine": i,
|
||||
"source": source_name,
|
||||
})
|
||||
|
||||
|
||||
B64CHARS = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
||||
SHIFTSIZE, FLAG, MASK = 5, 1 << 5, (1 << 5) - 1
|
||||
|
||||
|
||||
@lru_cache(maxsize=64)
|
||||
def base64vlq_encode(*values):
|
||||
"""
|
||||
Encode Base64 VLQ encoded sequences
|
||||
https://gist.github.com/mjpieters/86b0d152bb51d5f5979346d11005588b
|
||||
Base64 VLQ is used in source maps.
|
||||
VLQ values consist of 6 bits (matching the 64 characters of the Base64
|
||||
alphabet), with the most significant bit a *continuation* flag. If the
|
||||
flag is set, then the next character in the input is part of the same
|
||||
integer value. Multiple VLQ character sequences so form an unbounded
|
||||
integer value, in little-endian order.
|
||||
The *first* VLQ value consists of a continuation flag, 4 bits for the
|
||||
value, and the last bit the *sign* of the integer:
|
||||
+-----+-----+-----+-----+-----+-----+
|
||||
| c | b3 | b2 | b1 | b0 | s |
|
||||
+-----+-----+-----+-----+-----+-----+
|
||||
while subsequent VLQ characters contain 5 bits of value:
|
||||
+-----+-----+-----+-----+-----+-----+
|
||||
| c | b4 | b3 | b2 | b1 | b0 |
|
||||
+-----+-----+-----+-----+-----+-----+
|
||||
For source maps, Base64 VLQ sequences can contain 1, 4 or 5 elements.
|
||||
"""
|
||||
results = []
|
||||
add = results.append
|
||||
for v in values:
|
||||
# add sign bit
|
||||
v = (abs(v) << 1) | int(v < 0)
|
||||
while True:
|
||||
toencode, v = v & MASK, v >> SHIFTSIZE
|
||||
add(toencode | (v and FLAG))
|
||||
if not v:
|
||||
break
|
||||
return bytes(map(B64CHARS.__getitem__, results)).decode()
|
||||
214
odoo-bringout-oca-ocb-base/odoo/tools/speedscope.py
Normal file
214
odoo-bringout-oca-ocb-base/odoo/tools/speedscope.py
Normal file
|
|
@ -0,0 +1,214 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
import reprlib
|
||||
|
||||
shortener = reprlib.Repr()
|
||||
shortener.maxstring = 150
|
||||
shorten = shortener.repr
|
||||
|
||||
|
||||
class Speedscope:
|
||||
def __init__(self, name='Speedscope', init_stack_trace=None):
|
||||
self.init_stack_trace = init_stack_trace or []
|
||||
self.init_stack_trace_level = len(self.init_stack_trace)
|
||||
self.caller_frame = None
|
||||
self.convert_stack(self.init_stack_trace)
|
||||
|
||||
self.init_caller_frame = None
|
||||
if self.init_stack_trace:
|
||||
self.init_caller_frame = self.init_stack_trace[-1]
|
||||
self.profiles_raw = {}
|
||||
self.name = name
|
||||
self.frames_indexes = {}
|
||||
self.frame_count = 0
|
||||
self.profiles = []
|
||||
|
||||
def add(self, key, profile):
|
||||
for entry in profile:
|
||||
self.caller_frame = self.init_caller_frame
|
||||
self.convert_stack(entry['stack'] or [])
|
||||
if 'query' in entry:
|
||||
query = entry['query']
|
||||
full_query = entry['full_query']
|
||||
entry['stack'].append((f'sql({shorten(query)})', full_query, None))
|
||||
self.profiles_raw[key] = profile
|
||||
|
||||
def convert_stack(self, stack):
|
||||
for index, frame in enumerate(stack):
|
||||
method = frame[2]
|
||||
line = ''
|
||||
number = ''
|
||||
if self.caller_frame and len(self.caller_frame) == 4:
|
||||
line = f"called at {self.caller_frame[0]} ({self.caller_frame[3].strip()})"
|
||||
number = self.caller_frame[1]
|
||||
stack[index] = (method, line, number,)
|
||||
self.caller_frame = frame
|
||||
|
||||
def add_output(self, names, complete=True, display_name=None, use_context=True, **params):
|
||||
entries = []
|
||||
display_name = display_name or ','.join(names)
|
||||
for name in names:
|
||||
entries += self.profiles_raw[name]
|
||||
entries.sort(key=lambda e: e['start'])
|
||||
result = self.process(entries, use_context=use_context, **params)
|
||||
if not result:
|
||||
return self
|
||||
start = result[0]['at']
|
||||
end = result[-1]['at']
|
||||
|
||||
if complete:
|
||||
start_stack = []
|
||||
end_stack = []
|
||||
init_stack_trace_ids = self.stack_to_ids(self.init_stack_trace, use_context and entries[0].get('exec_context'))
|
||||
for frame_id in init_stack_trace_ids:
|
||||
start_stack.append({
|
||||
"type": "O",
|
||||
"frame": frame_id,
|
||||
"at": start
|
||||
})
|
||||
for frame_id in reversed(init_stack_trace_ids):
|
||||
end_stack.append({
|
||||
"type": "C",
|
||||
"frame": frame_id,
|
||||
"at": end
|
||||
})
|
||||
result = start_stack + result + end_stack
|
||||
|
||||
self.profiles.append({
|
||||
"name": display_name,
|
||||
"type": "evented",
|
||||
"unit": "seconds",
|
||||
"startValue": 0,
|
||||
"endValue": end - start,
|
||||
"events": result
|
||||
})
|
||||
return self
|
||||
|
||||
def add_default(self):
|
||||
if len(self.profiles_raw) > 1:
|
||||
self.add_output(self.profiles_raw, display_name='Combined')
|
||||
self.add_output(self.profiles_raw, display_name='Combined no context', use_context=False)
|
||||
for key, profile in self.profiles_raw.items():
|
||||
sql = profile and profile[0].get('query')
|
||||
if sql:
|
||||
self.add_output([key], hide_gaps=True, display_name=f'{key} (no gap)')
|
||||
self.add_output([key], continuous=False, complete=False, display_name=f'{key} (density)')
|
||||
|
||||
else:
|
||||
self.add_output([key], display_name=key)
|
||||
return self
|
||||
|
||||
def make(self):
|
||||
if not self.profiles:
|
||||
self.add_default()
|
||||
return {
|
||||
"name": self.name,
|
||||
"activeProfileIndex": 0,
|
||||
"$schema": "https://www.speedscope.app/file-format-schema.json",
|
||||
"shared": {
|
||||
"frames": [{
|
||||
"name": frame[0],
|
||||
"file": frame[1],
|
||||
"line": frame[2]
|
||||
} for frame in self.frames_indexes]
|
||||
},
|
||||
"profiles": self.profiles,
|
||||
}
|
||||
|
||||
def get_frame_id(self, frame):
|
||||
if frame not in self.frames_indexes:
|
||||
self.frames_indexes[frame] = self.frame_count
|
||||
self.frame_count += 1
|
||||
return self.frames_indexes[frame]
|
||||
|
||||
def stack_to_ids(self, stack, context, stack_offset=0):
|
||||
"""
|
||||
:param stack: A list of hashable frame
|
||||
:param context: an iterable of (level, value) ordered by level
|
||||
:param stack_offset: offeset level for stack
|
||||
|
||||
Assemble stack and context and return a list of ids representing
|
||||
this stack, adding each corresponding context at the corresponding
|
||||
level.
|
||||
"""
|
||||
stack_ids = []
|
||||
context_iterator = iter(context or ())
|
||||
context_level, context_value = next(context_iterator, (None, None))
|
||||
# consume iterator until we are over stack_offset
|
||||
while context_level is not None and context_level < stack_offset:
|
||||
context_level, context_value = next(context_iterator, (None, None))
|
||||
for level, frame in enumerate(stack, start=stack_offset + 1):
|
||||
while context_level == level:
|
||||
context_frame = (", ".join(f"{k}={v}" for k, v in context_value.items()), '', '')
|
||||
stack_ids.append(self.get_frame_id(context_frame))
|
||||
context_level, context_value = next(context_iterator, (None, None))
|
||||
stack_ids.append(self.get_frame_id(frame))
|
||||
return stack_ids
|
||||
|
||||
def process(self, entries, continuous=True, hide_gaps=False, use_context=True, constant_time=False):
|
||||
# constant_time parameters is mainly usefull to hide temporality when focussing on sql determinism
|
||||
entry_end = previous_end = None
|
||||
if not entries:
|
||||
return []
|
||||
events = []
|
||||
current_stack_ids = []
|
||||
frames_start = entries[0]['start']
|
||||
|
||||
# add last closing entry if missing
|
||||
last_entry = entries[-1]
|
||||
if last_entry['stack']:
|
||||
entries.append({'stack': [], 'start': last_entry['start'] + last_entry.get('time', 0)})
|
||||
|
||||
for index, entry in enumerate(entries):
|
||||
if constant_time:
|
||||
entry_start = close_time = index
|
||||
else:
|
||||
previous_end = entry_end
|
||||
|
||||
if hide_gaps and previous_end:
|
||||
entry_start = previous_end
|
||||
else:
|
||||
entry_start = entry['start'] - frames_start
|
||||
|
||||
if previous_end and previous_end > entry_start:
|
||||
# skip entry if entry starts after another entry end
|
||||
continue
|
||||
|
||||
if previous_end:
|
||||
close_time = min(entry_start, previous_end)
|
||||
else:
|
||||
close_time = entry_start
|
||||
|
||||
entry_time = entry.get('time')
|
||||
entry_end = None if entry_time is None else entry_start + entry_time
|
||||
|
||||
entry_stack_ids = self.stack_to_ids(
|
||||
entry['stack'] or [],
|
||||
use_context and entry.get('exec_context'),
|
||||
self.init_stack_trace_level
|
||||
)
|
||||
level = 0
|
||||
if continuous:
|
||||
level = -1
|
||||
for level, at_level in enumerate(zip(current_stack_ids, entry_stack_ids)):
|
||||
current, new = at_level
|
||||
if current != new:
|
||||
break
|
||||
else:
|
||||
level += 1
|
||||
|
||||
for frame in reversed(current_stack_ids[level:]):
|
||||
events.append({
|
||||
"type": "C",
|
||||
"frame": frame,
|
||||
"at": close_time
|
||||
})
|
||||
for frame in entry_stack_ids[level:]:
|
||||
events.append({
|
||||
"type": "O",
|
||||
"frame": frame,
|
||||
"at": entry_start
|
||||
})
|
||||
current_stack_ids = entry_stack_ids
|
||||
|
||||
return events
|
||||
433
odoo-bringout-oca-ocb-base/odoo/tools/sql.py
Normal file
433
odoo-bringout-oca-ocb-base/odoo/tools/sql.py
Normal file
|
|
@ -0,0 +1,433 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
# pylint: disable=sql-injection
|
||||
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
import psycopg2
|
||||
from psycopg2.sql import SQL, Identifier
|
||||
|
||||
import odoo.sql_db
|
||||
from collections import defaultdict
|
||||
from contextlib import closing
|
||||
|
||||
_schema = logging.getLogger('odoo.schema')
|
||||
|
||||
_CONFDELTYPES = {
|
||||
'RESTRICT': 'r',
|
||||
'NO ACTION': 'a',
|
||||
'CASCADE': 'c',
|
||||
'SET NULL': 'n',
|
||||
'SET DEFAULT': 'd',
|
||||
}
|
||||
|
||||
def existing_tables(cr, tablenames):
|
||||
""" Return the names of existing tables among ``tablenames``. """
|
||||
query = """
|
||||
SELECT c.relname
|
||||
FROM pg_class c
|
||||
JOIN pg_namespace n ON (n.oid = c.relnamespace)
|
||||
WHERE c.relname IN %s
|
||||
AND c.relkind IN ('r', 'v', 'm')
|
||||
AND n.nspname = current_schema
|
||||
"""
|
||||
cr.execute(query, [tuple(tablenames)])
|
||||
return [row[0] for row in cr.fetchall()]
|
||||
|
||||
def table_exists(cr, tablename):
|
||||
""" Return whether the given table exists. """
|
||||
return len(existing_tables(cr, {tablename})) == 1
|
||||
|
||||
def table_kind(cr, tablename):
|
||||
""" Return the kind of a table: ``'r'`` (regular table), ``'v'`` (view),
|
||||
``'f'`` (foreign table), ``'t'`` (temporary table),
|
||||
``'m'`` (materialized view), or ``None``.
|
||||
"""
|
||||
query = """
|
||||
SELECT c.relkind
|
||||
FROM pg_class c
|
||||
JOIN pg_namespace n ON (n.oid = c.relnamespace)
|
||||
WHERE c.relname = %s
|
||||
AND n.nspname = current_schema
|
||||
"""
|
||||
cr.execute(query, (tablename,))
|
||||
return cr.fetchone()[0] if cr.rowcount else None
|
||||
|
||||
# prescribed column order by type: columns aligned on 4 bytes, columns aligned
|
||||
# on 1 byte, columns aligned on 8 bytes(values have been chosen to minimize
|
||||
# padding in rows; unknown column types are put last)
|
||||
SQL_ORDER_BY_TYPE = defaultdict(lambda: 16, {
|
||||
'int4': 1, # 4 bytes aligned on 4 bytes
|
||||
'varchar': 2, # variable aligned on 4 bytes
|
||||
'date': 3, # 4 bytes aligned on 4 bytes
|
||||
'jsonb': 4, # jsonb
|
||||
'text': 5, # variable aligned on 4 bytes
|
||||
'numeric': 6, # variable aligned on 4 bytes
|
||||
'bool': 7, # 1 byte aligned on 1 byte
|
||||
'timestamp': 8, # 8 bytes aligned on 8 bytes
|
||||
'float8': 9, # 8 bytes aligned on 8 bytes
|
||||
})
|
||||
|
||||
def create_model_table(cr, tablename, comment=None, columns=()):
|
||||
""" Create the table for a model. """
|
||||
colspecs = ['id SERIAL NOT NULL'] + [
|
||||
'"{}" {}'.format(columnname, columntype)
|
||||
for columnname, columntype, columncomment in columns
|
||||
]
|
||||
cr.execute('CREATE TABLE "{}" ({}, PRIMARY KEY(id))'.format(tablename, ", ".join(colspecs)))
|
||||
|
||||
queries, params = [], []
|
||||
if comment:
|
||||
queries.append('COMMENT ON TABLE "{}" IS %s'.format(tablename))
|
||||
params.append(comment)
|
||||
for columnname, columntype, columncomment in columns:
|
||||
queries.append('COMMENT ON COLUMN "{}"."{}" IS %s'.format(tablename, columnname))
|
||||
params.append(columncomment)
|
||||
if queries:
|
||||
cr.execute("; ".join(queries), params)
|
||||
|
||||
_schema.debug("Table %r: created", tablename)
|
||||
|
||||
def table_columns(cr, tablename):
|
||||
""" Return a dict mapping column names to their configuration. The latter is
|
||||
a dict with the data from the table ``information_schema.columns``.
|
||||
"""
|
||||
# Do not select the field `character_octet_length` from `information_schema.columns`
|
||||
# because specific access right restriction in the context of shared hosting (Heroku, OVH, ...)
|
||||
# might prevent a postgres user to read this field.
|
||||
query = '''SELECT column_name, udt_name, character_maximum_length, is_nullable
|
||||
FROM information_schema.columns WHERE table_name=%s'''
|
||||
cr.execute(query, (tablename,))
|
||||
return {row['column_name']: row for row in cr.dictfetchall()}
|
||||
|
||||
def column_exists(cr, tablename, columnname):
|
||||
""" Return whether the given column exists. """
|
||||
query = """ SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name=%s AND column_name=%s """
|
||||
cr.execute(query, (tablename, columnname))
|
||||
return cr.rowcount
|
||||
|
||||
def create_column(cr, tablename, columnname, columntype, comment=None):
|
||||
""" Create a column with the given type. """
|
||||
coldefault = (columntype.upper()=='BOOLEAN') and 'DEFAULT false' or ''
|
||||
cr.execute('ALTER TABLE "{}" ADD COLUMN "{}" {} {}'.format(tablename, columnname, columntype, coldefault))
|
||||
if comment:
|
||||
cr.execute('COMMENT ON COLUMN "{}"."{}" IS %s'.format(tablename, columnname), (comment,))
|
||||
_schema.debug("Table %r: added column %r of type %s", tablename, columnname, columntype)
|
||||
|
||||
def rename_column(cr, tablename, columnname1, columnname2):
|
||||
""" Rename the given column. """
|
||||
cr.execute('ALTER TABLE "{}" RENAME COLUMN "{}" TO "{}"'.format(tablename, columnname1, columnname2))
|
||||
_schema.debug("Table %r: renamed column %r to %r", tablename, columnname1, columnname2)
|
||||
|
||||
def convert_column(cr, tablename, columnname, columntype):
|
||||
""" Convert the column to the given type. """
|
||||
using = f'"{columnname}"::{columntype}'
|
||||
_convert_column(cr, tablename, columnname, columntype, using)
|
||||
|
||||
def convert_column_translatable(cr, tablename, columnname, columntype):
|
||||
""" Convert the column from/to a 'jsonb' translated field column. """
|
||||
drop_index(cr, f"{tablename}_{columnname}_index", tablename)
|
||||
if columntype == "jsonb":
|
||||
using = f"""CASE WHEN "{columnname}" IS NOT NULL THEN jsonb_build_object('en_US', "{columnname}"::varchar) END"""
|
||||
else:
|
||||
using = f""""{columnname}"->>'en_US'"""
|
||||
_convert_column(cr, tablename, columnname, columntype, using)
|
||||
|
||||
def _convert_column(cr, tablename, columnname, columntype, using):
|
||||
query = f'''
|
||||
ALTER TABLE "{tablename}"
|
||||
ALTER COLUMN "{columnname}" DROP DEFAULT,
|
||||
ALTER COLUMN "{columnname}" TYPE {columntype} USING {using}
|
||||
'''
|
||||
try:
|
||||
with cr.savepoint(flush=False):
|
||||
cr.execute(query, log_exceptions=False)
|
||||
except psycopg2.NotSupportedError:
|
||||
drop_depending_views(cr, tablename, columnname)
|
||||
cr.execute(query)
|
||||
_schema.debug("Table %r: column %r changed to type %s", tablename, columnname, columntype)
|
||||
|
||||
def drop_depending_views(cr, table, column):
|
||||
"""drop views depending on a field to allow the ORM to resize it in-place"""
|
||||
for v, k in get_depending_views(cr, table, column):
|
||||
cr.execute("DROP {0} VIEW IF EXISTS {1} CASCADE".format("MATERIALIZED" if k == "m" else "", v))
|
||||
_schema.debug("Drop view %r", v)
|
||||
|
||||
def get_depending_views(cr, table, column):
|
||||
# http://stackoverflow.com/a/11773226/75349
|
||||
q = """
|
||||
SELECT distinct quote_ident(dependee.relname), dependee.relkind
|
||||
FROM pg_depend
|
||||
JOIN pg_rewrite ON pg_depend.objid = pg_rewrite.oid
|
||||
JOIN pg_class as dependee ON pg_rewrite.ev_class = dependee.oid
|
||||
JOIN pg_class as dependent ON pg_depend.refobjid = dependent.oid
|
||||
JOIN pg_attribute ON pg_depend.refobjid = pg_attribute.attrelid
|
||||
AND pg_depend.refobjsubid = pg_attribute.attnum
|
||||
WHERE dependent.relname = %s
|
||||
AND pg_attribute.attnum > 0
|
||||
AND pg_attribute.attname = %s
|
||||
AND dependee.relkind in ('v', 'm')
|
||||
"""
|
||||
cr.execute(q, [table, column])
|
||||
return cr.fetchall()
|
||||
|
||||
def set_not_null(cr, tablename, columnname):
|
||||
""" Add a NOT NULL constraint on the given column. """
|
||||
query = 'ALTER TABLE "{}" ALTER COLUMN "{}" SET NOT NULL'.format(tablename, columnname)
|
||||
try:
|
||||
with cr.savepoint(flush=False):
|
||||
cr.execute(query, log_exceptions=False)
|
||||
_schema.debug("Table %r: column %r: added constraint NOT NULL", tablename, columnname)
|
||||
except Exception:
|
||||
raise Exception("Table %r: unable to set NOT NULL on column %r", tablename, columnname)
|
||||
|
||||
def drop_not_null(cr, tablename, columnname):
|
||||
""" Drop the NOT NULL constraint on the given column. """
|
||||
cr.execute('ALTER TABLE "{}" ALTER COLUMN "{}" DROP NOT NULL'.format(tablename, columnname))
|
||||
_schema.debug("Table %r: column %r: dropped constraint NOT NULL", tablename, columnname)
|
||||
|
||||
def constraint_definition(cr, tablename, constraintname):
|
||||
""" Return the given constraint's definition. """
|
||||
query = """
|
||||
SELECT COALESCE(d.description, pg_get_constraintdef(c.oid))
|
||||
FROM pg_constraint c
|
||||
JOIN pg_class t ON t.oid = c.conrelid
|
||||
LEFT JOIN pg_description d ON c.oid = d.objoid
|
||||
WHERE t.relname = %s AND conname = %s;"""
|
||||
cr.execute(query, (tablename, constraintname))
|
||||
return cr.fetchone()[0] if cr.rowcount else None
|
||||
|
||||
def add_constraint(cr, tablename, constraintname, definition):
|
||||
""" Add a constraint on the given table. """
|
||||
query1 = 'ALTER TABLE "{}" ADD CONSTRAINT "{}" {}'.format(tablename, constraintname, definition)
|
||||
query2 = 'COMMENT ON CONSTRAINT "{}" ON "{}" IS %s'.format(constraintname, tablename)
|
||||
try:
|
||||
with cr.savepoint(flush=False):
|
||||
cr.execute(query1, log_exceptions=False)
|
||||
cr.execute(query2, (definition,), log_exceptions=False)
|
||||
_schema.debug("Table %r: added constraint %r as %s", tablename, constraintname, definition)
|
||||
except Exception:
|
||||
raise Exception("Table %r: unable to add constraint %r as %s", tablename, constraintname, definition)
|
||||
|
||||
def drop_constraint(cr, tablename, constraintname):
|
||||
""" drop the given constraint. """
|
||||
try:
|
||||
with cr.savepoint(flush=False):
|
||||
cr.execute('ALTER TABLE "{}" DROP CONSTRAINT "{}"'.format(tablename, constraintname))
|
||||
_schema.debug("Table %r: dropped constraint %r", tablename, constraintname)
|
||||
except Exception:
|
||||
_schema.warning("Table %r: unable to drop constraint %r!", tablename, constraintname)
|
||||
|
||||
def add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
|
||||
""" Create the given foreign key, and return ``True``. """
|
||||
query = 'ALTER TABLE "{}" ADD FOREIGN KEY ("{}") REFERENCES "{}"("{}") ON DELETE {}'
|
||||
cr.execute(query.format(tablename1, columnname1, tablename2, columnname2, ondelete))
|
||||
_schema.debug("Table %r: added foreign key %r references %r(%r) ON DELETE %s",
|
||||
tablename1, columnname1, tablename2, columnname2, ondelete)
|
||||
return True
|
||||
|
||||
def get_foreign_keys(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
|
||||
cr.execute(
|
||||
"""
|
||||
SELECT fk.conname as name
|
||||
FROM pg_constraint AS fk
|
||||
JOIN pg_class AS c1 ON fk.conrelid = c1.oid
|
||||
JOIN pg_class AS c2 ON fk.confrelid = c2.oid
|
||||
JOIN pg_attribute AS a1 ON a1.attrelid = c1.oid AND fk.conkey[1] = a1.attnum
|
||||
JOIN pg_attribute AS a2 ON a2.attrelid = c2.oid AND fk.confkey[1] = a2.attnum
|
||||
WHERE fk.contype = 'f'
|
||||
AND c1.relname = %s
|
||||
AND a1.attname = %s
|
||||
AND c2.relname = %s
|
||||
AND a2.attname = %s
|
||||
AND fk.confdeltype = %s
|
||||
""", [tablename1, columnname1, tablename2, columnname2, _CONFDELTYPES[ondelete.upper()]]
|
||||
)
|
||||
return [r[0] for r in cr.fetchall()]
|
||||
|
||||
def fix_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
|
||||
""" Update the foreign keys between tables to match the given one, and
|
||||
return ``True`` if the given foreign key has been recreated.
|
||||
"""
|
||||
# Do not use 'information_schema' here, as those views are awfully slow!
|
||||
deltype = _CONFDELTYPES.get(ondelete.upper(), 'a')
|
||||
query = """ SELECT con.conname, c2.relname, a2.attname, con.confdeltype as deltype
|
||||
FROM pg_constraint as con, pg_class as c1, pg_class as c2,
|
||||
pg_attribute as a1, pg_attribute as a2
|
||||
WHERE con.contype='f' AND con.conrelid=c1.oid AND con.confrelid=c2.oid
|
||||
AND array_lower(con.conkey, 1)=1 AND con.conkey[1]=a1.attnum
|
||||
AND array_lower(con.confkey, 1)=1 AND con.confkey[1]=a2.attnum
|
||||
AND a1.attrelid=c1.oid AND a2.attrelid=c2.oid
|
||||
AND c1.relname=%s AND a1.attname=%s """
|
||||
cr.execute(query, (tablename1, columnname1))
|
||||
found = False
|
||||
for fk in cr.fetchall():
|
||||
if not found and fk[1:] == (tablename2, columnname2, deltype):
|
||||
found = True
|
||||
else:
|
||||
drop_constraint(cr, tablename1, fk[0])
|
||||
if not found:
|
||||
return add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete)
|
||||
|
||||
def index_exists(cr, indexname):
|
||||
""" Return whether the given index exists. """
|
||||
cr.execute("SELECT 1 FROM pg_indexes WHERE indexname=%s", (indexname,))
|
||||
return cr.rowcount
|
||||
|
||||
def check_index_exist(cr, indexname):
|
||||
assert index_exists(cr, indexname), f"{indexname} does not exist"
|
||||
|
||||
def create_index(cr, indexname, tablename, expressions, method='btree', where=''):
|
||||
""" Create the given index unless it exists. """
|
||||
if index_exists(cr, indexname):
|
||||
return
|
||||
args = ', '.join(expressions)
|
||||
if where:
|
||||
where = f' WHERE {where}'
|
||||
cr.execute(f'CREATE INDEX "{indexname}" ON "{tablename}" USING {method} ({args}){where}')
|
||||
_schema.debug("Table %r: created index %r (%s)", tablename, indexname, args)
|
||||
|
||||
def create_unique_index(cr, indexname, tablename, expressions):
|
||||
""" Create the given index unless it exists. """
|
||||
if index_exists(cr, indexname):
|
||||
return
|
||||
args = ', '.join(expressions)
|
||||
cr.execute('CREATE UNIQUE INDEX "{}" ON "{}" ({})'.format(indexname, tablename, args))
|
||||
_schema.debug("Table %r: created index %r (%s)", tablename, indexname, args)
|
||||
|
||||
def drop_index(cr, indexname, tablename):
|
||||
""" Drop the given index if it exists. """
|
||||
cr.execute('DROP INDEX IF EXISTS "{}"'.format(indexname))
|
||||
_schema.debug("Table %r: dropped index %r", tablename, indexname)
|
||||
|
||||
def drop_view_if_exists(cr, viewname):
|
||||
kind = table_kind(cr, viewname)
|
||||
if kind == 'v':
|
||||
cr.execute("DROP VIEW {} CASCADE".format(viewname))
|
||||
elif kind == 'm':
|
||||
cr.execute("DROP MATERIALIZED VIEW {} CASCADE".format(viewname))
|
||||
|
||||
def escape_psql(to_escape):
|
||||
return to_escape.replace('\\', r'\\').replace('%', r'\%').replace('_', r'\_')
|
||||
|
||||
def pg_varchar(size=0):
|
||||
""" Returns the VARCHAR declaration for the provided size:
|
||||
|
||||
* If no size (or an empty or negative size is provided) return an
|
||||
'infinite' VARCHAR
|
||||
* Otherwise return a VARCHAR(n)
|
||||
|
||||
:param int size: varchar size, optional
|
||||
:rtype: str
|
||||
"""
|
||||
if size:
|
||||
if not isinstance(size, int):
|
||||
raise ValueError("VARCHAR parameter should be an int, got %s" % type(size))
|
||||
if size > 0:
|
||||
return 'VARCHAR(%d)' % size
|
||||
return 'VARCHAR'
|
||||
|
||||
def reverse_order(order):
|
||||
""" Reverse an ORDER BY clause """
|
||||
items = []
|
||||
for item in order.split(','):
|
||||
item = item.lower().split()
|
||||
direction = 'asc' if item[1:] == ['desc'] else 'desc'
|
||||
items.append('%s %s' % (item[0], direction))
|
||||
return ', '.join(items)
|
||||
|
||||
|
||||
def increment_fields_skiplock(records, *fields):
|
||||
"""
|
||||
Increment 'friendly' the given `fields` of the current `records`.
|
||||
If record is locked, we just skip the update.
|
||||
It doesn't invalidate the cache since the update is not critical.
|
||||
|
||||
:param records: recordset to update
|
||||
:param fields: integer fields to increment
|
||||
:returns: whether the specified fields were incremented on any record.
|
||||
:rtype: bool
|
||||
"""
|
||||
if not records:
|
||||
return False
|
||||
|
||||
for field in fields:
|
||||
assert records._fields[field].type == 'integer'
|
||||
|
||||
query = SQL("""
|
||||
UPDATE {table}
|
||||
SET {sets}
|
||||
WHERE id IN (SELECT id FROM {table} WHERE id = ANY(%(ids)s) FOR UPDATE SKIP LOCKED)
|
||||
""").format(
|
||||
table=Identifier(records._table),
|
||||
sets=SQL(', ').join(map(
|
||||
SQL('{0} = COALESCE({0}, 0) + 1').format,
|
||||
map(Identifier, fields)
|
||||
))
|
||||
)
|
||||
|
||||
cr = records._cr
|
||||
cr.execute(query, {'ids': records.ids})
|
||||
return bool(cr.rowcount)
|
||||
|
||||
|
||||
def value_to_translated_trigram_pattern(value):
|
||||
""" Escape value to match a translated field's trigram index content
|
||||
|
||||
The trigram index function jsonb_path_query_array("column_name", '$.*')::text
|
||||
uses all translations' representations to build the indexed text. So the
|
||||
original text needs to be JSON-escaped correctly to match it.
|
||||
|
||||
:param str value: value provided in domain
|
||||
:return: a pattern to match the indexed text
|
||||
"""
|
||||
if len(value) < 3:
|
||||
# matching less than 3 characters will not take advantage of the index
|
||||
return '%'
|
||||
|
||||
# apply JSON escaping to value; the argument ensure_ascii=False prevents
|
||||
# json.dumps from escaping unicode to ascii, which is consistent with the
|
||||
# index function jsonb_path_query_array("column_name", '$.*')::text
|
||||
json_escaped = json.dumps(value, ensure_ascii=False)[1:-1]
|
||||
|
||||
# apply PG wildcard escaping to JSON-escaped text
|
||||
wildcard_escaped = re.sub(r'(_|%|\\)', r'\\\1', json_escaped)
|
||||
|
||||
# add wildcards around it to get the pattern
|
||||
return f"%{wildcard_escaped}%"
|
||||
|
||||
|
||||
def pattern_to_translated_trigram_pattern(pattern):
|
||||
""" Escape pattern to match a translated field's trigram index content
|
||||
|
||||
The trigram index function jsonb_path_query_array("column_name", '$.*')::text
|
||||
uses all translations' representations to build the indexed text. So the
|
||||
original pattern needs to be JSON-escaped correctly to match it.
|
||||
|
||||
:param str pattern: value provided in domain
|
||||
:return: a pattern to match the indexed text
|
||||
"""
|
||||
# find the parts around (non-escaped) wildcard characters (_, %)
|
||||
sub_patterns = re.findall(r'''
|
||||
(
|
||||
(?:.)*? # 0 or more charaters including the newline character
|
||||
(?<!\\)(?:\\\\)* # 0 or even number of backslashes to promise the next wildcard character is not escaped
|
||||
)
|
||||
(?:_|%|$) # a non-escaped wildcard charater or end of the string
|
||||
''', pattern, flags=re.VERBOSE | re.DOTALL)
|
||||
|
||||
# unescape PG wildcards from each sub pattern (\% becomes %)
|
||||
sub_texts = [re.sub(r'\\(.|$)', r'\1', t, flags=re.DOTALL) for t in sub_patterns]
|
||||
|
||||
# apply JSON escaping to sub texts having at least 3 characters (" becomes \");
|
||||
# the argument ensure_ascii=False prevents from escaping unicode to ascii
|
||||
json_escaped = [json.dumps(t, ensure_ascii=False)[1:-1] for t in sub_texts if len(t) >= 3]
|
||||
|
||||
# apply PG wildcard escaping to JSON-escaped texts (% becomes \%)
|
||||
wildcard_escaped = [re.sub(r'(_|%|\\)', r'\\\1', t) for t in json_escaped]
|
||||
|
||||
# replace the original wildcard characters by %
|
||||
return f"%{'%'.join(wildcard_escaped)}%" if wildcard_escaped else "%"
|
||||
279
odoo-bringout-oca-ocb-base/odoo/tools/template_inheritance.py
Normal file
279
odoo-bringout-oca-ocb-base/odoo/tools/template_inheritance.py
Normal file
|
|
@ -0,0 +1,279 @@
|
|||
|
||||
from lxml import etree
|
||||
from lxml.builder import E
|
||||
import copy
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
|
||||
from odoo.tools.translate import _
|
||||
from odoo.tools import SKIPPED_ELEMENT_TYPES, html_escape
|
||||
from odoo.exceptions import ValidationError
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
RSTRIP_REGEXP = re.compile(r'\n[ \t]*$')
|
||||
|
||||
def add_stripped_items_before(node, spec, extract):
|
||||
text = spec.text or ''
|
||||
|
||||
before_text = ''
|
||||
prev = node.getprevious()
|
||||
if prev is None:
|
||||
parent = node.getparent()
|
||||
result = parent.text and RSTRIP_REGEXP.search(parent.text)
|
||||
before_text = result.group(0) if result else ''
|
||||
parent.text = (parent.text or '').rstrip() + text
|
||||
else:
|
||||
result = prev.tail and RSTRIP_REGEXP.search(prev.tail)
|
||||
before_text = result.group(0) if result else ''
|
||||
prev.tail = (prev.tail or '').rstrip() + text
|
||||
|
||||
if len(spec) > 0:
|
||||
spec[-1].tail = (spec[-1].tail or "").rstrip() + before_text
|
||||
else:
|
||||
spec.text = (spec.text or "").rstrip() + before_text
|
||||
|
||||
for child in spec:
|
||||
if child.get('position') == 'move':
|
||||
child = extract(child)
|
||||
node.addprevious(child)
|
||||
|
||||
|
||||
def add_text_before(node, text):
|
||||
""" Add text before ``node`` in its XML tree. """
|
||||
if text is None:
|
||||
return
|
||||
prev = node.getprevious()
|
||||
if prev is not None:
|
||||
prev.tail = (prev.tail or "") + text
|
||||
else:
|
||||
parent = node.getparent()
|
||||
parent.text = (parent.text or "").rstrip() + text
|
||||
|
||||
|
||||
def remove_element(node):
|
||||
""" Remove ``node`` but not its tail, from its XML tree. """
|
||||
add_text_before(node, node.tail)
|
||||
node.tail = None
|
||||
node.getparent().remove(node)
|
||||
|
||||
|
||||
def locate_node(arch, spec):
|
||||
""" Locate a node in a source (parent) architecture.
|
||||
|
||||
Given a complete source (parent) architecture (i.e. the field
|
||||
`arch` in a view), and a 'spec' node (a node in an inheriting
|
||||
view that specifies the location in the source view of what
|
||||
should be changed), return (if it exists) the node in the
|
||||
source view matching the specification.
|
||||
|
||||
:param arch: a parent architecture to modify
|
||||
:param spec: a modifying node in an inheriting view
|
||||
:return: a node in the source matching the spec
|
||||
"""
|
||||
if spec.tag == 'xpath':
|
||||
expr = spec.get('expr')
|
||||
try:
|
||||
xPath = etree.ETXPath(expr)
|
||||
except etree.XPathSyntaxError as e:
|
||||
raise ValidationError(_("Invalid Expression while parsing xpath %r", expr)) from e
|
||||
nodes = xPath(arch)
|
||||
return nodes[0] if nodes else None
|
||||
elif spec.tag == 'field':
|
||||
# Only compare the field name: a field can be only once in a given view
|
||||
# at a given level (and for multilevel expressions, we should use xpath
|
||||
# inheritance spec anyway).
|
||||
for node in arch.iter('field'):
|
||||
if node.get('name') == spec.get('name'):
|
||||
return node
|
||||
return None
|
||||
|
||||
for node in arch.iter(spec.tag):
|
||||
if isinstance(node, SKIPPED_ELEMENT_TYPES):
|
||||
continue
|
||||
if all(node.get(attr) == spec.get(attr) for attr in spec.attrib
|
||||
if attr not in ('position', 'version')):
|
||||
# Version spec should match parent's root element's version
|
||||
if spec.get('version') and spec.get('version') != arch.get('version'):
|
||||
return None
|
||||
return node
|
||||
return None
|
||||
|
||||
|
||||
def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_locate=lambda s: True):
|
||||
""" Apply an inheriting view (a descendant of the base view)
|
||||
|
||||
Apply to a source architecture all the spec nodes (i.e. nodes
|
||||
describing where and what changes to apply to some parent
|
||||
architecture) given by an inheriting view.
|
||||
|
||||
:param Element source: a parent architecture to modify
|
||||
:param Element specs_tree: a modifying architecture in an inheriting view
|
||||
:param bool inherit_branding:
|
||||
:param pre_locate: function that is executed before locating a node.
|
||||
This function receives an arch as argument.
|
||||
This is required by studio to properly handle group_ids.
|
||||
:return: a modified source where the specs are applied
|
||||
:rtype: Element
|
||||
"""
|
||||
# Queue of specification nodes (i.e. nodes describing where and
|
||||
# changes to apply to some parent architecture).
|
||||
specs = specs_tree if isinstance(specs_tree, list) else [specs_tree]
|
||||
|
||||
def extract(spec):
|
||||
"""
|
||||
Utility function that locates a node given a specification, remove
|
||||
it from the source and returns it.
|
||||
"""
|
||||
if len(spec):
|
||||
raise ValueError(
|
||||
_("Invalid specification for moved nodes: %r", etree.tostring(spec, encoding='unicode'))
|
||||
)
|
||||
pre_locate(spec)
|
||||
to_extract = locate_node(source, spec)
|
||||
if to_extract is not None:
|
||||
remove_element(to_extract)
|
||||
return to_extract
|
||||
else:
|
||||
raise ValueError(
|
||||
_("Element %r cannot be located in parent view", etree.tostring(spec, encoding='unicode'))
|
||||
)
|
||||
|
||||
while len(specs):
|
||||
spec = specs.pop(0)
|
||||
if isinstance(spec, SKIPPED_ELEMENT_TYPES):
|
||||
continue
|
||||
if spec.tag == 'data':
|
||||
specs += [c for c in spec]
|
||||
continue
|
||||
pre_locate(spec)
|
||||
node = locate_node(source, spec)
|
||||
if node is not None:
|
||||
pos = spec.get('position', 'inside')
|
||||
if pos == 'replace':
|
||||
mode = spec.get('mode', 'outer')
|
||||
if mode == "outer":
|
||||
for loc in spec.xpath(".//*[text()='$0']"):
|
||||
loc.text = ''
|
||||
copied_node = copy.deepcopy(node)
|
||||
# TODO: Remove 'inherit_branding' logic if possible;
|
||||
# currently needed to track node removal for branding
|
||||
# distribution. Avoid marking root nodes to prevent
|
||||
# sibling branding issues.
|
||||
if inherit_branding:
|
||||
copied_node.set('data-oe-no-branding', '1')
|
||||
loc.append(copied_node)
|
||||
if node.getparent() is None:
|
||||
spec_content = None
|
||||
comment = None
|
||||
for content in spec:
|
||||
if content.tag is not etree.Comment:
|
||||
spec_content = content
|
||||
break
|
||||
else:
|
||||
comment = content
|
||||
source = copy.deepcopy(spec_content)
|
||||
# only keep the t-name of a template root node
|
||||
t_name = node.get('t-name')
|
||||
if t_name:
|
||||
source.set('t-name', t_name)
|
||||
if comment is not None:
|
||||
text = source.text
|
||||
source.text = None
|
||||
comment.tail = text
|
||||
source.insert(0, comment)
|
||||
else:
|
||||
# TODO ideally the notion of 'inherit_branding' should
|
||||
# not exist in this function. Given the current state of
|
||||
# the code, it is however necessary to know where nodes
|
||||
# were removed when distributing branding. As a stable
|
||||
# fix, this solution was chosen: the location is marked
|
||||
# with a "ProcessingInstruction" which will not impact
|
||||
# the "Element" structure of the resulting tree.
|
||||
# Exception: if we happen to replace a node that already
|
||||
# has xpath branding (root level nodes), do not mark the
|
||||
# location of the removal as it will mess up the branding
|
||||
# of siblings elements coming from other views, after the
|
||||
# branding is distributed (and those processing instructions
|
||||
# removed).
|
||||
if inherit_branding and not node.get('data-oe-xpath'):
|
||||
node.addprevious(etree.ProcessingInstruction('apply-inheritance-specs-node-removal', node.tag))
|
||||
|
||||
for child in spec:
|
||||
if child.get('position') == 'move':
|
||||
child = extract(child)
|
||||
node.addprevious(child)
|
||||
node.getparent().remove(node)
|
||||
elif mode == "inner":
|
||||
# Replace the entire content of an element
|
||||
for child in node:
|
||||
node.remove(child)
|
||||
node.text = None
|
||||
|
||||
for child in spec:
|
||||
node.append(copy.deepcopy(child))
|
||||
node.text = spec.text
|
||||
|
||||
else:
|
||||
raise ValueError(_("Invalid mode attribute:") + " '%s'" % mode)
|
||||
elif pos == 'attributes':
|
||||
for child in spec.getiterator('attribute'):
|
||||
attribute = child.get('name')
|
||||
value = child.text or ''
|
||||
if child.get('add') or child.get('remove'):
|
||||
assert not child.text
|
||||
separator = child.get('separator', ',')
|
||||
if separator == ' ':
|
||||
separator = None # squash spaces
|
||||
to_add = (
|
||||
s for s in (s.strip() for s in child.get('add', '').split(separator))
|
||||
if s
|
||||
)
|
||||
to_remove = {s.strip() for s in child.get('remove', '').split(separator)}
|
||||
values = (s.strip() for s in node.get(attribute, '').split(separator))
|
||||
value = (separator or ' ').join(itertools.chain(
|
||||
(v for v in values if v not in to_remove),
|
||||
to_add
|
||||
))
|
||||
if value:
|
||||
node.set(attribute, value)
|
||||
elif attribute in node.attrib:
|
||||
del node.attrib[attribute]
|
||||
elif pos == 'inside':
|
||||
# add a sentinel element at the end, insert content of spec
|
||||
# before the sentinel, then remove the sentinel element
|
||||
sentinel = E.sentinel()
|
||||
node.append(sentinel)
|
||||
add_stripped_items_before(sentinel, spec, extract)
|
||||
remove_element(sentinel)
|
||||
elif pos == 'after':
|
||||
# add a sentinel element right after node, insert content of
|
||||
# spec before the sentinel, then remove the sentinel element
|
||||
sentinel = E.sentinel()
|
||||
node.addnext(sentinel)
|
||||
if node.tail is not None: # for lxml >= 5.1
|
||||
sentinel.tail = node.tail
|
||||
node.tail = None
|
||||
add_stripped_items_before(sentinel, spec, extract)
|
||||
remove_element(sentinel)
|
||||
elif pos == 'before':
|
||||
add_stripped_items_before(node, spec, extract)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
_("Invalid position attribute: '%s'") %
|
||||
pos
|
||||
)
|
||||
|
||||
else:
|
||||
attrs = ''.join([
|
||||
' %s="%s"' % (attr, html_escape(spec.get(attr)))
|
||||
for attr in spec.attrib
|
||||
if attr != 'position'
|
||||
])
|
||||
tag = "<%s%s>" % (spec.tag, attrs)
|
||||
raise ValueError(
|
||||
_("Element '%s' cannot be located in parent view", tag)
|
||||
)
|
||||
|
||||
return source
|
||||
295
odoo-bringout-oca-ocb-base/odoo/tools/test_reports.py
Normal file
295
odoo-bringout-oca-ocb-base/odoo/tools/test_reports.py
Normal file
|
|
@ -0,0 +1,295 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
""" Helper functions for reports testing.
|
||||
|
||||
Please /do not/ import this file by default, but only explicitly call it
|
||||
through the code of python tests.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from lxml import etree
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from .. import api
|
||||
from . import ustr, config
|
||||
from .safe_eval import safe_eval
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
_test_logger = logging.getLogger('odoo.tests')
|
||||
|
||||
|
||||
def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None, report_type=None):
|
||||
""" Try to render a report <rname> with contents of ids
|
||||
|
||||
This function should also check for common pitfalls of reports.
|
||||
"""
|
||||
if context is None:
|
||||
context = {}
|
||||
_test_logger.info(" - Trying %s.create(%r)", rname, ids)
|
||||
|
||||
env = api.Environment(cr, uid, context)
|
||||
|
||||
res_data, res_format = env['ir.actions.report']._render(rname, ids, data=data)
|
||||
|
||||
if not res_data:
|
||||
raise ValueError("Report %s produced an empty result!" % rname)
|
||||
|
||||
_logger.debug("Have a %s report for %s, will examine it", res_format, rname)
|
||||
if res_format == 'pdf':
|
||||
if res_data[:5] != b'%PDF-':
|
||||
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
|
||||
res_text = False
|
||||
try:
|
||||
fd, rfname = tempfile.mkstemp(suffix=res_format)
|
||||
os.write(fd, res_data)
|
||||
os.close(fd)
|
||||
|
||||
proc = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', rfname, '-'], shell=False, stdout=PIPE)
|
||||
stdout, stderr = proc.communicate()
|
||||
res_text = ustr(stdout)
|
||||
os.unlink(rfname)
|
||||
except Exception:
|
||||
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
|
||||
|
||||
if res_text is not False:
|
||||
for line in res_text.split('\n'):
|
||||
if ('[[' in line) or ('[ [' in line):
|
||||
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
|
||||
# TODO more checks, what else can be a sign of a faulty report?
|
||||
elif res_format == 'html':
|
||||
pass
|
||||
else:
|
||||
_logger.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
|
||||
return False
|
||||
|
||||
_test_logger.info(" + Report %s produced correctly.", rname)
|
||||
return True
|
||||
|
||||
def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
|
||||
wiz_data=None, wiz_buttons=None,
|
||||
context=None, our_module=None):
|
||||
"""Take an ir.actions.act_window and follow it until a report is produced
|
||||
|
||||
:param cr:
|
||||
:param uid:
|
||||
:param action_id: the integer id of an action, or a reference to xml id
|
||||
of the act_window (can search [our_module.]+xml_id
|
||||
:param active_model:
|
||||
:param active_ids: call the action as if it had been launched
|
||||
from that model+ids (tree/form view action)
|
||||
:param wiz_data: a dictionary of values to use in the wizard, if needed.
|
||||
They will override (or complete) the default values of the
|
||||
wizard form.
|
||||
:param wiz_buttons: a list of button names, or button icon strings, which
|
||||
should be preferred to press during the wizard.
|
||||
Eg. 'OK' or 'fa-print'
|
||||
:param context:
|
||||
:param our_module: the name of the calling module (string), like 'account'
|
||||
"""
|
||||
if not our_module and isinstance(action_id, str):
|
||||
if '.' in action_id:
|
||||
our_module = action_id.split('.', 1)[0]
|
||||
|
||||
context = dict(context or {})
|
||||
# TODO context fill-up
|
||||
|
||||
env = api.Environment(cr, uid, context)
|
||||
|
||||
def log_test(msg, *args):
|
||||
_test_logger.info(" - " + msg, *args)
|
||||
|
||||
datas = {}
|
||||
if active_model:
|
||||
datas['model'] = active_model
|
||||
if active_ids:
|
||||
datas['ids'] = active_ids
|
||||
|
||||
if not wiz_buttons:
|
||||
wiz_buttons = []
|
||||
|
||||
if isinstance(action_id, str):
|
||||
if '.' in action_id:
|
||||
_, act_xmlid = action_id.split('.', 1)
|
||||
else:
|
||||
if not our_module:
|
||||
raise ValueError('You cannot only specify action_id "%s" without a module name' % action_id)
|
||||
act_xmlid = action_id
|
||||
action_id = '%s.%s' % (our_module, action_id)
|
||||
action = env.ref(action_id)
|
||||
act_model, act_id = action._name, action.id
|
||||
else:
|
||||
assert isinstance(action_id, int)
|
||||
act_model = 'ir.actions.act_window' # assume that
|
||||
act_id = action_id
|
||||
act_xmlid = '<%s>' % act_id
|
||||
|
||||
def _exec_action(action, datas, env):
|
||||
# taken from client/modules/action/main.py:84 _exec_action()
|
||||
if isinstance(action, bool) or 'type' not in action:
|
||||
return
|
||||
# Updating the context : Adding the context of action in order to use it on Views called from buttons
|
||||
context = dict(env.context)
|
||||
if datas.get('id',False):
|
||||
context.update( {'active_id': datas.get('id',False), 'active_ids': datas.get('ids',[]), 'active_model': datas.get('model',False)})
|
||||
context1 = action.get('context', {})
|
||||
if isinstance(context1, str):
|
||||
context1 = safe_eval(context1, dict(context))
|
||||
context.update(context1)
|
||||
env = env(context=context)
|
||||
if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']:
|
||||
for key in ('res_id', 'res_model', 'view_mode',
|
||||
'limit', 'search_view', 'search_view_id'):
|
||||
datas[key] = action.get(key, datas.get(key, None))
|
||||
|
||||
view_id = False
|
||||
view_type = None
|
||||
if action.get('views', []):
|
||||
if isinstance(action['views'],list):
|
||||
view_id, view_type = action['views'][0]
|
||||
datas['view_mode']= view_type
|
||||
else:
|
||||
if action.get('view_id', False):
|
||||
view_id = action['view_id'][0]
|
||||
elif action.get('view_id', False):
|
||||
view_id = action['view_id'][0]
|
||||
|
||||
if view_type is None:
|
||||
if view_id:
|
||||
view_type = env['ir.ui.view'].browse(view_id).type
|
||||
else:
|
||||
view_type = action['view_mode'].split(',')[0]
|
||||
|
||||
assert datas['res_model'], "Cannot use the view without a model"
|
||||
# Here, we have a view that we need to emulate
|
||||
log_test("will emulate a %s view: %s#%s",
|
||||
view_type, datas['res_model'], view_id or '?')
|
||||
|
||||
model = env[datas['res_model']]
|
||||
view_res = model.get_view(view_id, view_type)
|
||||
assert view_res and view_res.get('arch'), "Did not return any arch for the view"
|
||||
view_data = {}
|
||||
arch = etree.fromstring(view_res['arch'])
|
||||
fields = [el.get('name') for el in arch.xpath('//field[not(ancestor::field)]')]
|
||||
if fields:
|
||||
view_data = model.default_get(fields)
|
||||
if datas.get('form'):
|
||||
view_data.update(datas.get('form'))
|
||||
if wiz_data:
|
||||
view_data.update(wiz_data)
|
||||
_logger.debug("View data is: %r", view_data)
|
||||
|
||||
for fk in fields:
|
||||
# Default fields returns list of int, while at create()
|
||||
# we need to send a [(6,0,[int,..])]
|
||||
if model._fields[fk].type in ('one2many', 'many2many') \
|
||||
and view_data.get(fk, False) \
|
||||
and isinstance(view_data[fk], list) \
|
||||
and not isinstance(view_data[fk][0], tuple) :
|
||||
view_data[fk] = [(6, 0, view_data[fk])]
|
||||
|
||||
action_name = action.get('name')
|
||||
try:
|
||||
from xml.dom import minidom
|
||||
cancel_found = False
|
||||
buttons = []
|
||||
dom_doc = minidom.parseString(view_res['arch'])
|
||||
if not action_name:
|
||||
action_name = dom_doc.documentElement.getAttribute('name')
|
||||
|
||||
for button in dom_doc.getElementsByTagName('button'):
|
||||
button_weight = 0
|
||||
if button.getAttribute('special') == 'cancel':
|
||||
cancel_found = True
|
||||
continue
|
||||
if button.getAttribute('icon') == 'fa-times-circle':
|
||||
cancel_found = True
|
||||
continue
|
||||
if button.getAttribute('default_focus') == '1':
|
||||
button_weight += 20
|
||||
if button.getAttribute('string') in wiz_buttons:
|
||||
button_weight += 30
|
||||
elif button.getAttribute('icon') in wiz_buttons:
|
||||
button_weight += 10
|
||||
string = button.getAttribute('string') or '?%s' % len(buttons)
|
||||
|
||||
buttons.append({
|
||||
'name': button.getAttribute('name'),
|
||||
'string': string,
|
||||
'type': button.getAttribute('type'),
|
||||
'weight': button_weight,
|
||||
})
|
||||
except Exception as e:
|
||||
_logger.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
|
||||
raise AssertionError(e.args[0])
|
||||
|
||||
if not datas['res_id']:
|
||||
# it is probably an orm_memory object, we need to create
|
||||
# an instance
|
||||
datas['res_id'] = env[datas['res_model']].create(view_data).id
|
||||
|
||||
if not buttons:
|
||||
raise AssertionError("view form doesn't have any buttons to press!")
|
||||
|
||||
buttons.sort(key=lambda b: b['weight'])
|
||||
_logger.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
|
||||
|
||||
res = None
|
||||
while buttons and not res:
|
||||
b = buttons.pop()
|
||||
log_test("in the \"%s\" form, I will press the \"%s\" button.", action_name, b['string'])
|
||||
if not b['type']:
|
||||
log_test("the \"%s\" button has no type, cannot use it", b['string'])
|
||||
continue
|
||||
if b['type'] == 'object':
|
||||
#there we are! press the button!
|
||||
rec = env[datas['res_model']].browse(datas['res_id'])
|
||||
func = getattr(rec, b['name'], None)
|
||||
if not func:
|
||||
_logger.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
|
||||
continue
|
||||
res = func()
|
||||
break
|
||||
else:
|
||||
_logger.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
|
||||
action_name, b['string'], b['type'])
|
||||
return res
|
||||
|
||||
elif action['type']=='ir.actions.report':
|
||||
if 'window' in datas:
|
||||
del datas['window']
|
||||
if not datas:
|
||||
datas = action.get('datas')
|
||||
if not datas:
|
||||
datas = action.get('data')
|
||||
datas = datas.copy()
|
||||
ids = datas.get('ids')
|
||||
if 'ids' in datas:
|
||||
del datas['ids']
|
||||
res = try_report(cr, uid, action['report_name'], ids, datas, context, our_module=our_module)
|
||||
return res
|
||||
else:
|
||||
raise Exception("Cannot handle action of type %s" % act_model)
|
||||
|
||||
log_test("will be using %s action %s #%d", act_model, act_xmlid, act_id)
|
||||
action = env[act_model].browse(act_id).read()[0]
|
||||
assert action, "Could not read action %s[%s]" % (act_model, act_id)
|
||||
loop = 0
|
||||
while action:
|
||||
loop += 1
|
||||
# This part tries to emulate the loop of the Gtk client
|
||||
if loop > 100:
|
||||
_logger.info("Passed %d loops, giving up", loop)
|
||||
raise Exception("Too many loops at action")
|
||||
log_test("it is an %s action at loop #%d", action.get('type', 'unknown'), loop)
|
||||
result = _exec_action(action, datas, env)
|
||||
if not isinstance(result, dict):
|
||||
break
|
||||
datas = result.get('datas', {})
|
||||
if datas:
|
||||
del result['datas']
|
||||
action = result
|
||||
|
||||
return True
|
||||
1725
odoo-bringout-oca-ocb-base/odoo/tools/translate.py
Normal file
1725
odoo-bringout-oca-ocb-base/odoo/tools/translate.py
Normal file
File diff suppressed because it is too large
Load diff
182
odoo-bringout-oca-ocb-base/odoo/tools/view_validation.py
Normal file
182
odoo-bringout-oca-ocb-base/odoo/tools/view_validation.py
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
""" View validation code (using assertions, not the RNG schema). """
|
||||
|
||||
import ast
|
||||
import collections
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from lxml import etree
|
||||
from odoo import tools
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_validators = collections.defaultdict(list)
|
||||
_relaxng_cache = {}
|
||||
|
||||
READONLY = re.compile(r"\breadonly\b")
|
||||
|
||||
|
||||
def _get_attrs_symbols():
|
||||
""" Return a set of predefined symbols for evaluating attrs. """
|
||||
return {
|
||||
'True', 'False', 'None', # those are identifiers in Python 2.7
|
||||
'self',
|
||||
'id',
|
||||
'uid',
|
||||
'context',
|
||||
'context_today',
|
||||
'active_id',
|
||||
'active_ids',
|
||||
'allowed_company_ids',
|
||||
'current_company_id',
|
||||
'active_model',
|
||||
'time',
|
||||
'datetime',
|
||||
'relativedelta',
|
||||
'current_date',
|
||||
'today',
|
||||
'now',
|
||||
'abs',
|
||||
'len',
|
||||
'bool',
|
||||
'float',
|
||||
'str',
|
||||
'unicode',
|
||||
}
|
||||
|
||||
|
||||
def get_variable_names(expr):
|
||||
""" Return the subexpressions of the kind "VARNAME(.ATTNAME)*" in the given
|
||||
string or AST node.
|
||||
"""
|
||||
IGNORED = _get_attrs_symbols()
|
||||
names = set()
|
||||
|
||||
def get_name_seq(node):
|
||||
if isinstance(node, ast.Name):
|
||||
return [node.id]
|
||||
elif isinstance(node, ast.Attribute):
|
||||
left = get_name_seq(node.value)
|
||||
return left and left + [node.attr]
|
||||
|
||||
def process(node):
|
||||
seq = get_name_seq(node)
|
||||
if seq and seq[0] not in IGNORED:
|
||||
names.add('.'.join(seq))
|
||||
else:
|
||||
for child in ast.iter_child_nodes(node):
|
||||
process(child)
|
||||
|
||||
if isinstance(expr, str):
|
||||
expr = ast.parse(expr.strip(), mode='eval').body
|
||||
process(expr)
|
||||
|
||||
return names
|
||||
|
||||
|
||||
def get_dict_asts(expr):
|
||||
""" Check that the given string or AST node represents a dict expression
|
||||
where all keys are string literals, and return it as a dict mapping string
|
||||
keys to the AST of values.
|
||||
"""
|
||||
if isinstance(expr, str):
|
||||
expr = ast.parse(expr.strip(), mode='eval').body
|
||||
|
||||
if not isinstance(expr, ast.Dict):
|
||||
raise ValueError("Non-dict expression")
|
||||
if not all(isinstance(key, ast.Str) for key in expr.keys):
|
||||
raise ValueError("Non-string literal dict key")
|
||||
return {key.s: val for key, val in zip(expr.keys, expr.values)}
|
||||
|
||||
|
||||
def _check(condition, explanation):
|
||||
if not condition:
|
||||
raise ValueError("Expression is not a valid domain: %s" % explanation)
|
||||
|
||||
|
||||
def get_domain_identifiers(expr):
|
||||
""" Check that the given string or AST node represents a domain expression,
|
||||
and return a pair of sets ``(fields, vars)`` where ``fields`` are the field
|
||||
names on the left-hand side of conditions, and ``vars`` are the variable
|
||||
names on the right-hand side of conditions.
|
||||
"""
|
||||
if not expr: # case of expr=""
|
||||
return (set(), set())
|
||||
if isinstance(expr, str):
|
||||
expr = ast.parse(expr.strip(), mode='eval').body
|
||||
|
||||
fnames = set()
|
||||
vnames = set()
|
||||
|
||||
if isinstance(expr, ast.List):
|
||||
for elem in expr.elts:
|
||||
if isinstance(elem, ast.Str):
|
||||
# note: this doesn't check the and/or structure
|
||||
_check(elem.s in ('&', '|', '!'),
|
||||
f"logical operators should be '&', '|', or '!', found {elem.s!r}")
|
||||
continue
|
||||
|
||||
if not isinstance(elem, (ast.List, ast.Tuple)):
|
||||
continue
|
||||
|
||||
_check(len(elem.elts) == 3,
|
||||
f"segments should have 3 elements, found {len(elem.elts)}")
|
||||
lhs, operator, rhs = elem.elts
|
||||
_check(isinstance(operator, ast.Str),
|
||||
f"operator should be a string, found {type(operator).__name__}")
|
||||
if isinstance(lhs, ast.Str):
|
||||
fnames.add(lhs.s)
|
||||
|
||||
vnames.update(get_variable_names(expr))
|
||||
|
||||
return (fnames, vnames)
|
||||
|
||||
|
||||
def valid_view(arch, **kwargs):
|
||||
for pred in _validators[arch.tag]:
|
||||
check = pred(arch, **kwargs)
|
||||
if not check:
|
||||
_logger.error("Invalid XML: %s", pred.__doc__)
|
||||
return False
|
||||
if check == "Warning":
|
||||
_logger.warning("Invalid XML: %s", pred.__doc__)
|
||||
return "Warning"
|
||||
return True
|
||||
|
||||
|
||||
def validate(*view_types):
|
||||
""" Registers a view-validation function for the specific view types
|
||||
"""
|
||||
def decorator(fn):
|
||||
for arch in view_types:
|
||||
_validators[arch].append(fn)
|
||||
return fn
|
||||
return decorator
|
||||
|
||||
|
||||
def relaxng(view_type):
|
||||
""" Return a validator for the given view type, or None. """
|
||||
if view_type not in _relaxng_cache:
|
||||
with tools.file_open(os.path.join('base', 'rng', '%s_view.rng' % view_type)) as frng:
|
||||
try:
|
||||
relaxng_doc = etree.parse(frng)
|
||||
_relaxng_cache[view_type] = etree.RelaxNG(relaxng_doc)
|
||||
except Exception:
|
||||
_logger.exception('Failed to load RelaxNG XML schema for views validation')
|
||||
_relaxng_cache[view_type] = None
|
||||
return _relaxng_cache[view_type]
|
||||
|
||||
|
||||
@validate('calendar', 'graph', 'pivot', 'search', 'tree', 'activity')
|
||||
def schema_valid(arch, **kwargs):
|
||||
""" Get RNG validator and validate RNG file."""
|
||||
validator = relaxng(arch.tag)
|
||||
if validator and not validator.validate(arch):
|
||||
result = True
|
||||
for error in validator.error_log:
|
||||
_logger.error(tools.ustr(error))
|
||||
result = False
|
||||
return result
|
||||
return True
|
||||
149
odoo-bringout-oca-ocb-base/odoo/tools/which.py
Executable file
149
odoo-bringout-oca-ocb-base/odoo/tools/which.py
Executable file
|
|
@ -0,0 +1,149 @@
|
|||
#!/usr/bin/env python
|
||||
""" Which - locate a command
|
||||
|
||||
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
|
||||
* see http://bugs.python.org/issue444582
|
||||
* uses ``PATHEXT`` on Windows
|
||||
* searches current directory before ``PATH`` on Windows,
|
||||
but not before an explicitly passed path
|
||||
* accepts both string or iterable for an explicitly passed path, or pathext
|
||||
* accepts an explicitly passed empty path, or pathext (either '' or [])
|
||||
* does not search ``PATH`` for files that have a path specified in their name already
|
||||
* moved defpath and defpathext lists initialization to module level,
|
||||
instead of initializing them on each function call
|
||||
* changed interface: which_files() returns generator, which() returns first match,
|
||||
or raises IOError(errno.ENOENT)
|
||||
|
||||
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
|
||||
|
||||
Return a generator which yields full paths in which the *file* name exists
|
||||
in a directory that is part of the file name, or on *path*,
|
||||
and has the given *mode*.
|
||||
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
|
||||
existing executable file.
|
||||
The *path* is, by default, the ``PATH`` variable on the platform,
|
||||
or the string/iterable passed in as *path*.
|
||||
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
|
||||
On Windows, a current directory is searched before using the ``PATH`` variable,
|
||||
but not before an explicitly passed *path*.
|
||||
The *pathext* is only used on Windows to match files with given extensions appended as well.
|
||||
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
|
||||
In the event that a ``PATHEXT`` variable is not found,
|
||||
default value for Windows XP/Vista is used.
|
||||
The command is always searched without extension first,
|
||||
even when *pathext* is explicitly passed.
|
||||
|
||||
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
|
||||
Return first match generated by which_files(file, mode, path, pathext),
|
||||
or raise IOError(errno.ENOENT).
|
||||
|
||||
"""
|
||||
__docformat__ = 'restructuredtext en'
|
||||
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
|
||||
|
||||
import sys
|
||||
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
|
||||
from os.path import exists, dirname, split, join
|
||||
ENOENT = 2
|
||||
|
||||
windows = sys.platform.startswith('win')
|
||||
|
||||
defpath = environ.get('PATH', defpath).split(pathsep)
|
||||
|
||||
if windows:
|
||||
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
|
||||
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
|
||||
seen = set()
|
||||
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
|
||||
del seen
|
||||
|
||||
defpathext = [''] + environ.get('PATHEXT',
|
||||
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
|
||||
else:
|
||||
defpathext = ['']
|
||||
|
||||
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
|
||||
""" Locate a file in a path supplied as a part of the file name,
|
||||
or the user's path, or a supplied path.
|
||||
The function yields full paths (not necessarily absolute paths),
|
||||
in which the given file name matches an existing file in a directory on the path.
|
||||
|
||||
>>> def test_which(expected, *args, **argd):
|
||||
... result = list(which_files(*args, **argd))
|
||||
... assert result == expected, 'which_files: %s != %s' % (result, expected)
|
||||
...
|
||||
... try:
|
||||
... result = [ which(*args, **argd) ]
|
||||
... except IOError:
|
||||
... result = []
|
||||
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
|
||||
|
||||
>>> if windows: cmd = environ['COMSPEC']
|
||||
>>> if windows: test_which([cmd], 'cmd')
|
||||
>>> if windows: test_which([cmd], 'cmd.exe')
|
||||
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
|
||||
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
|
||||
>>> if windows: test_which([cmd], cmd)
|
||||
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
|
||||
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
|
||||
>>> if windows: test_which([cmd], cmd[:-4])
|
||||
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
|
||||
|
||||
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
|
||||
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
|
||||
>>> if windows: test_which([], '<nonexistent>/cmd')
|
||||
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
|
||||
|
||||
>>> if not windows: sh = '/bin/sh'
|
||||
>>> if not windows: test_which([sh], 'sh')
|
||||
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
|
||||
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
|
||||
>>> if not windows: test_which([sh], sh)
|
||||
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
|
||||
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
|
||||
|
||||
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
|
||||
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
|
||||
>>> if not windows: test_which([], '<nonexistent>/sh')
|
||||
"""
|
||||
filepath, file = split(file)
|
||||
|
||||
if filepath:
|
||||
path = (filepath,)
|
||||
elif path is None:
|
||||
path = defpath
|
||||
elif isinstance(path, str):
|
||||
path = path.split(pathsep)
|
||||
|
||||
if pathext is None:
|
||||
pathext = defpathext
|
||||
elif isinstance(pathext, str):
|
||||
pathext = pathext.split(pathsep)
|
||||
|
||||
if not '' in pathext:
|
||||
pathext.insert(0, '') # always check command without extension, even for custom pathext
|
||||
|
||||
for dir in path:
|
||||
basepath = join(dir, file)
|
||||
for ext in pathext:
|
||||
fullpath = basepath + ext
|
||||
if exists(fullpath) and access(fullpath, mode):
|
||||
yield fullpath
|
||||
|
||||
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
|
||||
""" Locate a file in a path supplied as a part of the file name,
|
||||
or the user's path, or a supplied path.
|
||||
The function returns full path (not necessarily absolute path),
|
||||
in which the given file name matches an existing file in a directory on the path,
|
||||
or raises IOError(errno.ENOENT).
|
||||
|
||||
>>> # for doctest see which_files()
|
||||
"""
|
||||
path = next(which_files(file, mode, path, pathext), None)
|
||||
if path is None:
|
||||
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
|
||||
return path
|
||||
|
||||
if __name__ == '__main__':
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
30
odoo-bringout-oca-ocb-base/odoo/tools/win32.py
Normal file
30
odoo-bringout-oca-ocb-base/odoo/tools/win32.py
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
||||
|
||||
import locale
|
||||
import time
|
||||
import datetime
|
||||
|
||||
if not hasattr(locale, 'D_FMT'):
|
||||
locale.D_FMT = 1
|
||||
|
||||
if not hasattr(locale, 'T_FMT'):
|
||||
locale.T_FMT = 2
|
||||
|
||||
if not hasattr(locale, 'nl_langinfo'):
|
||||
def nl_langinfo(param):
|
||||
if param == locale.D_FMT:
|
||||
val = time.strptime('30/12/2004', '%d/%m/%Y')
|
||||
dt = datetime.datetime(*val[:-2])
|
||||
format_date = dt.strftime('%x')
|
||||
for x, y in [('30', '%d'),('12', '%m'),('2004','%Y'),('04', '%Y')]:
|
||||
format_date = format_date.replace(x, y)
|
||||
return format_date
|
||||
if param == locale.T_FMT:
|
||||
val = time.strptime('13:24:56', '%H:%M:%S')
|
||||
dt = datetime.datetime(*val[:-2])
|
||||
format_time = dt.strftime('%X')
|
||||
for x, y in [('13', '%H'),('24', '%M'),('56','%S')]:
|
||||
format_time = format_time.replace(x, y)
|
||||
return format_time
|
||||
locale.nl_langinfo = nl_langinfo
|
||||
304
odoo-bringout-oca-ocb-base/odoo/tools/xml_utils.py
Normal file
304
odoo-bringout-oca-ocb-base/odoo/tools/xml_utils.py
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Utilities for generating, parsing and checking XML/XSD files on top of the lxml.etree module."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import requests
|
||||
import zipfile
|
||||
from io import BytesIO
|
||||
from lxml import etree
|
||||
import contextlib
|
||||
|
||||
from odoo.exceptions import UserError
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def remove_control_characters(byte_node):
|
||||
"""
|
||||
The characters to be escaped are the control characters #x0 to #x1F and #x7F (most of which cannot appear in XML)
|
||||
[...] XML processors must accept any character in the range specified for Char:
|
||||
`Char :: = #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]`
|
||||
source:https://www.w3.org/TR/xml/
|
||||
"""
|
||||
return re.sub(
|
||||
'[^'
|
||||
'\u0009'
|
||||
'\u000A'
|
||||
'\u000D'
|
||||
'\u0020-\uD7FF'
|
||||
'\uE000-\uFFFD'
|
||||
'\U00010000-\U0010FFFF'
|
||||
']'.encode(),
|
||||
b'',
|
||||
byte_node,
|
||||
)
|
||||
|
||||
|
||||
class odoo_resolver(etree.Resolver):
|
||||
"""Odoo specific file resolver that can be added to the XML Parser.
|
||||
|
||||
It will search filenames in the ir.attachments
|
||||
"""
|
||||
|
||||
def __init__(self, env, prefix):
|
||||
super().__init__()
|
||||
self.env = env
|
||||
self.prefix = prefix
|
||||
|
||||
def resolve(self, url, id, context):
|
||||
"""Search url in ``ir.attachment`` and return the resolved content."""
|
||||
attachment_name = f'{self.prefix}.{url}' if self.prefix else url
|
||||
attachment = self.env['ir.attachment'].search([('name', '=', attachment_name)])
|
||||
if attachment:
|
||||
return self.resolve_string(attachment.raw, context)
|
||||
|
||||
|
||||
def _check_with_xsd(tree_or_str, stream, env=None, prefix=None):
|
||||
"""Check an XML against an XSD schema.
|
||||
|
||||
This will raise a UserError if the XML file is not valid according to the
|
||||
XSD file.
|
||||
|
||||
:param str | etree._Element tree_or_str: representation of the tree to be checked
|
||||
:param io.IOBase | str stream: the byte stream used to build the XSD schema.
|
||||
If env is given, it can also be the name of an attachment in the filestore
|
||||
:param odoo.api.Environment env: If it is given, it enables resolving the
|
||||
imports of the schema in the filestore with ir.attachments.
|
||||
:param str prefix: if given, provides a prefix to try when
|
||||
resolving the imports of the schema. e.g. prefix='l10n_cl_edi' will
|
||||
enable 'SiiTypes_v10.xsd' to be resolved to 'l10n_cl_edi.SiiTypes_v10.xsd'.
|
||||
"""
|
||||
if not isinstance(tree_or_str, etree._Element):
|
||||
tree_or_str = etree.fromstring(tree_or_str)
|
||||
parser = etree.XMLParser()
|
||||
if env:
|
||||
parser.resolvers.add(odoo_resolver(env, prefix))
|
||||
if isinstance(stream, str) and stream.endswith('.xsd'):
|
||||
attachment = env['ir.attachment'].search([('name', '=', stream)])
|
||||
if not attachment:
|
||||
raise FileNotFoundError()
|
||||
stream = BytesIO(attachment.raw)
|
||||
xsd_schema = etree.XMLSchema(etree.parse(stream, parser=parser))
|
||||
try:
|
||||
xsd_schema.assertValid(tree_or_str)
|
||||
except etree.DocumentInvalid as xml_errors:
|
||||
raise UserError('\n'.join(str(e) for e in xml_errors.error_log))
|
||||
|
||||
|
||||
def create_xml_node_chain(first_parent_node, nodes_list, last_node_value=None):
|
||||
"""Generate a hierarchical chain of nodes.
|
||||
|
||||
Each new node being the child of the previous one based on the tags contained
|
||||
in `nodes_list`, under the given node `first_parent_node`.
|
||||
|
||||
:param etree._Element first_parent_node: parent of the created tree/chain
|
||||
:param iterable[str] nodes_list: tag names to be created
|
||||
:param str last_node_value: if specified, set the last node's text to this value
|
||||
:returns: the list of created nodes
|
||||
:rtype: list[etree._Element]
|
||||
"""
|
||||
res = []
|
||||
current_node = first_parent_node
|
||||
for tag in nodes_list:
|
||||
current_node = etree.SubElement(current_node, tag)
|
||||
res.append(current_node)
|
||||
|
||||
if last_node_value is not None:
|
||||
current_node.text = last_node_value
|
||||
return res
|
||||
|
||||
|
||||
def create_xml_node(parent_node, node_name, node_value=None):
|
||||
"""Create a new node.
|
||||
|
||||
:param etree._Element parent_node: parent of the created node
|
||||
:param str node_name: name of the created node
|
||||
:param str node_value: value of the created node (optional)
|
||||
:rtype: etree._Element
|
||||
"""
|
||||
return create_xml_node_chain(parent_node, [node_name], node_value)[0]
|
||||
|
||||
|
||||
def cleanup_xml_node(xml_node_or_string, remove_blank_text=True, remove_blank_nodes=True, indent_level=0, indent_space=" "):
|
||||
"""Clean up the sub-tree of the provided XML node.
|
||||
|
||||
If the provided XML node is of type:
|
||||
- etree._Element, it is modified in-place.
|
||||
- string/bytes, it is first parsed into an etree._Element
|
||||
:param xml_node_or_string (etree._Element, str): XML node (or its string/bytes representation)
|
||||
:param remove_blank_text (bool): if True, removes whitespace-only text from nodes
|
||||
:param remove_blank_nodes (bool): if True, removes leaf nodes with no text (iterative, depth-first, done after remove_blank_text)
|
||||
:param indent_level (int): depth or level of node within root tree (use -1 to leave indentation as-is)
|
||||
:param indent_space (str): string to use for indentation (use '' to remove all indentation)
|
||||
:returns (etree._Element): clean node, same instance that was received (if applicable)
|
||||
"""
|
||||
xml_node = xml_node_or_string
|
||||
|
||||
# Convert str/bytes to etree._Element
|
||||
if isinstance(xml_node, str):
|
||||
xml_node = xml_node.encode() # misnomer: fromstring actually reads bytes
|
||||
if isinstance(xml_node, bytes):
|
||||
xml_node = etree.fromstring(remove_control_characters(xml_node))
|
||||
|
||||
# Process leaf nodes iteratively
|
||||
# Depth-first, so any inner node may become a leaf too (if children are removed)
|
||||
def leaf_iter(parent_node, node, level):
|
||||
for child_node in node:
|
||||
leaf_iter(node, child_node, level if level < 0 else level + 1)
|
||||
|
||||
# Indentation
|
||||
if level >= 0:
|
||||
indent = '\n' + indent_space * level
|
||||
if not node.tail or not node.tail.strip():
|
||||
node.tail = '\n' if parent_node is None else indent
|
||||
if len(node) > 0:
|
||||
if not node.text or not node.text.strip():
|
||||
# First child's indentation is parent's text
|
||||
node.text = indent + indent_space
|
||||
last_child = node[-1]
|
||||
if last_child.tail == indent + indent_space:
|
||||
# Last child's tail is parent's closing tag indentation
|
||||
last_child.tail = indent
|
||||
|
||||
# Removal condition: node is leaf (not root nor inner node)
|
||||
if parent_node is not None and len(node) == 0:
|
||||
if remove_blank_text and node.text is not None and not node.text.strip():
|
||||
# node.text is None iff node.tag is self-closing (text='' creates closing tag)
|
||||
node.text = ''
|
||||
if remove_blank_nodes and not (node.text or ''):
|
||||
parent_node.remove(node)
|
||||
|
||||
leaf_iter(None, xml_node, indent_level)
|
||||
return xml_node
|
||||
|
||||
|
||||
def load_xsd_files_from_url(env, url, file_name=None, force_reload=False,
|
||||
request_max_timeout=10, xsd_name_prefix='', xsd_names_filter=None, modify_xsd_content=None):
|
||||
"""Load XSD file or ZIP archive. Save XSD files as ir.attachment.
|
||||
|
||||
An XSD attachment is saved as {xsd_name_prefix}.{filename} where the filename is either the filename obtained
|
||||
from the URL or from the ZIP archive, or the `file_name` param if it is specified and a single XSD is being downloaded.
|
||||
A typical prefix is the calling module's name.
|
||||
|
||||
For ZIP archives, XSD files inside it will be saved as attachments, depending on the provided list of XSD names.
|
||||
ZIP archive themselves are not saved.
|
||||
|
||||
The XSD files content can be modified by providing the `modify_xsd_content` function as argument.
|
||||
Typically, this is used when XSD files depend on each other (with the schemaLocation attribute),
|
||||
but it can be used for any purpose.
|
||||
|
||||
:param odoo.api.Environment env: environment of calling module
|
||||
:param str url: URL of XSD file/ZIP archive
|
||||
:param str file_name: used as attachment name if the URL leads to a single XSD, otherwise ignored
|
||||
:param bool force_reload: Deprecated.
|
||||
:param int request_max_timeout: maximum time (in seconds) before the request times out
|
||||
:param str xsd_name_prefix: if provided, will be added as a prefix to every XSD file name
|
||||
:param list | str xsd_names_filter: if provided, will only save the XSD files with these names
|
||||
:param func modify_xsd_content: function that takes the xsd content as argument and returns a modified version of it
|
||||
:rtype: odoo.api.ir.attachment | bool
|
||||
:return: every XSD attachment created/fetched or False if an error occurred (see warning logs)
|
||||
"""
|
||||
try:
|
||||
_logger.info("Fetching file/archive from given URL: %s", url)
|
||||
response = requests.get(url, timeout=request_max_timeout)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as error:
|
||||
_logger.warning('HTTP error: %s with the given URL: %s', error, url)
|
||||
return False
|
||||
except requests.exceptions.ConnectionError as error:
|
||||
_logger.warning('Connection error: %s with the given URL: %s', error, url)
|
||||
return False
|
||||
except requests.exceptions.Timeout as error:
|
||||
_logger.warning('Request timeout: %s with the given URL: %s', error, url)
|
||||
return False
|
||||
|
||||
content = response.content
|
||||
if not content:
|
||||
_logger.warning("The HTTP response from %s is empty (no content)", url)
|
||||
return False
|
||||
|
||||
archive = None
|
||||
with contextlib.suppress(zipfile.BadZipFile):
|
||||
archive = zipfile.ZipFile(BytesIO(content))
|
||||
|
||||
if archive is None:
|
||||
if modify_xsd_content:
|
||||
content = modify_xsd_content(content)
|
||||
if not file_name:
|
||||
file_name = f"{url.split('/')[-1]}"
|
||||
_logger.info("XSD name not provided, defaulting to %s", file_name)
|
||||
|
||||
prefixed_xsd_name = f"{xsd_name_prefix}.{file_name}" if xsd_name_prefix else file_name
|
||||
fetched_attachment = env['ir.attachment'].search([('name', '=', prefixed_xsd_name)], limit=1)
|
||||
if fetched_attachment:
|
||||
_logger.info("Updating the content of ir.attachment with name: %s", prefixed_xsd_name)
|
||||
fetched_attachment.raw = content
|
||||
return fetched_attachment
|
||||
else:
|
||||
_logger.info("Saving XSD file as ir.attachment, with name: %s", prefixed_xsd_name)
|
||||
return env['ir.attachment'].create({
|
||||
'name': prefixed_xsd_name,
|
||||
'raw': content,
|
||||
'public': True,
|
||||
})
|
||||
|
||||
saved_attachments = env['ir.attachment']
|
||||
for file_path in archive.namelist():
|
||||
if not file_path.endswith('.xsd'):
|
||||
continue
|
||||
|
||||
file_name = file_path.rsplit('/', 1)[-1]
|
||||
|
||||
if xsd_names_filter and file_name not in xsd_names_filter:
|
||||
_logger.info("Skipping file with name %s in ZIP archive", file_name)
|
||||
continue
|
||||
|
||||
try:
|
||||
content = archive.read(file_path)
|
||||
except KeyError:
|
||||
_logger.warning("Failed to retrieve XSD file with name %s from ZIP archive", file_name)
|
||||
continue
|
||||
if modify_xsd_content:
|
||||
content = modify_xsd_content(content)
|
||||
|
||||
prefixed_xsd_name = f"{xsd_name_prefix}.{file_name}" if xsd_name_prefix else file_name
|
||||
fetched_attachment = env['ir.attachment'].search([('name', '=', prefixed_xsd_name)], limit=1)
|
||||
if fetched_attachment:
|
||||
_logger.info("Updating the content of ir.attachment with name: %s", prefixed_xsd_name)
|
||||
fetched_attachment.raw = content
|
||||
saved_attachments |= fetched_attachment
|
||||
|
||||
else:
|
||||
_logger.info("Saving XSD file as ir.attachment, with name: %s", prefixed_xsd_name)
|
||||
saved_attachments |= env['ir.attachment'].create({
|
||||
'name': prefixed_xsd_name,
|
||||
'raw': content,
|
||||
'public': True,
|
||||
})
|
||||
|
||||
return saved_attachments
|
||||
|
||||
|
||||
def validate_xml_from_attachment(env, xml_content, xsd_name, reload_files_function=None, prefix=None):
|
||||
"""Try and validate the XML content with an XSD attachment.
|
||||
If the XSD attachment cannot be found in database, skip validation without raising.
|
||||
If the skip_xsd context key is truthy, skip validation.
|
||||
|
||||
:param odoo.api.Environment env: environment of calling module
|
||||
:param xml_content: the XML content to validate
|
||||
:param xsd_name: the XSD file name in database
|
||||
:param reload_files_function: Deprecated.
|
||||
:return: the result of the function :func:`odoo.tools.xml_utils._check_with_xsd`
|
||||
"""
|
||||
if env.context.get('skip_xsd', False):
|
||||
return
|
||||
|
||||
prefixed_xsd_name = f"{prefix}.{xsd_name}" if prefix else xsd_name
|
||||
try:
|
||||
_logger.info("Validating with XSD...")
|
||||
_check_with_xsd(xml_content, prefixed_xsd_name, env, prefix)
|
||||
_logger.info("XSD validation successful!")
|
||||
except FileNotFoundError:
|
||||
_logger.info("XSD file not found, skipping validation")
|
||||
8
odoo-bringout-oca-ocb-base/odoo/tools/zeep/__init__.py
Normal file
8
odoo-bringout-oca-ocb-base/odoo/tools/zeep/__init__.py
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
from zeep.transports import Transport
|
||||
from zeep.plugins import Plugin
|
||||
from zeep.settings import Settings
|
||||
|
||||
from . import exceptions
|
||||
from . import ns
|
||||
from . import wsdl
|
||||
from .client import Client
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
185
odoo-bringout-oca-ocb-base/odoo/tools/zeep/client.py
Normal file
185
odoo-bringout-oca-ocb-base/odoo/tools/zeep/client.py
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
import zeep
|
||||
|
||||
from decimal import Decimal
|
||||
from datetime import date, datetime, timedelta
|
||||
from requests import Response
|
||||
from types import SimpleNamespace, FunctionType
|
||||
|
||||
|
||||
TIMEOUT = 30
|
||||
SERIALIZABLE_TYPES = (
|
||||
type(None), bool, int, float, str, bytes, tuple, list, dict, Decimal, date, datetime, timedelta, Response
|
||||
)
|
||||
|
||||
|
||||
class Client:
|
||||
"""A wrapper for Zeep.Client
|
||||
|
||||
* providing a simpler API to pass timeouts and session,
|
||||
* restricting its attributes to a few, most-commonly used accross Odoo's modules,
|
||||
* serializing the returned values of its methods.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
transport = kwargs.setdefault('transport', zeep.Transport())
|
||||
# The timeout for loading wsdl and xsd documents.
|
||||
transport.load_timeout = kwargs.pop('timeout', None) or transport.load_timeout or TIMEOUT
|
||||
# The timeout for operations (POST/GET)
|
||||
transport.operation_timeout = kwargs.pop('operation_timeout', None) or transport.operation_timeout or TIMEOUT
|
||||
# The `requests.session` used for HTTP requests
|
||||
transport.session = kwargs.pop('session', None) or transport.session
|
||||
|
||||
client = zeep.Client(*args, **kwargs)
|
||||
|
||||
self.__obj = client
|
||||
self.__service = None
|
||||
|
||||
@classmethod
|
||||
def __serialize_object(cls, obj):
|
||||
if isinstance(obj, list):
|
||||
return [cls.__serialize_object(sub) for sub in obj]
|
||||
if isinstance(obj, (dict, zeep.xsd.valueobjects.CompoundValue)):
|
||||
result = SerialProxy(**{key: cls.__serialize_object(obj[key]) for key in obj})
|
||||
return result
|
||||
if type(obj) in SERIALIZABLE_TYPES:
|
||||
return obj
|
||||
raise ValueError(f'{obj} is not serializable')
|
||||
|
||||
@classmethod
|
||||
def __serialize_object_wrapper(cls, method):
|
||||
def wrapper(*args, **kwargs):
|
||||
return cls.__serialize_object(method(*args, **kwargs))
|
||||
return wrapper
|
||||
|
||||
@property
|
||||
def service(self):
|
||||
if not self.__service:
|
||||
self.__service = ReadOnlyMethodNamespace(**{
|
||||
key: self.__serialize_object_wrapper(operation)
|
||||
for key, operation in self.__obj.service._operations.items()
|
||||
})
|
||||
return self.__service
|
||||
|
||||
def type_factory(self, namespace):
|
||||
types = self.__obj.wsdl.types
|
||||
namespace = namespace if namespace in types.namespaces else types.get_ns_prefix(namespace)
|
||||
documents = types.documents.get_by_namespace(namespace, fail_silently=True)
|
||||
types = {
|
||||
key[len(f'{{{namespace}}}'):]: type_
|
||||
for document in documents
|
||||
for key, type_ in document._types.items()
|
||||
}
|
||||
return ReadOnlyMethodNamespace(**{key: self.__serialize_object_wrapper(type_) for key, type_ in types.items()})
|
||||
|
||||
def get_type(self, name):
|
||||
return self.__serialize_object_wrapper(self.__obj.wsdl.types.get_type(name))
|
||||
|
||||
def create_service(self, binding_name, address):
|
||||
service = self.__obj.create_service(binding_name, address)
|
||||
return ReadOnlyMethodNamespace(**{
|
||||
key: self.__serialize_object_wrapper(operation)
|
||||
for key, operation in service._operations.items()
|
||||
})
|
||||
|
||||
def bind(self, service_name, port_name):
|
||||
service = self.__obj.bind(service_name, port_name)
|
||||
operations = {
|
||||
key: self.__serialize_object_wrapper(operation)
|
||||
for key, operation in service._operations.items()
|
||||
}
|
||||
operations['_binding_options'] = service._binding_options
|
||||
return ReadOnlyMethodNamespace(**operations)
|
||||
|
||||
|
||||
class ReadOnlyMethodNamespace(SimpleNamespace):
|
||||
"""A read-only attribute-based namespace not prefixed by `_` and restricted to functions.
|
||||
|
||||
By default, `types.SympleNamespace` doesn't implement `__setitem__` and `__delitem__`,
|
||||
no need to implement them to ensure the read-only property of this class.
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
assert all(
|
||||
(not key.startswith('_') and isinstance(value, FunctionType))
|
||||
or
|
||||
(key == '_binding_options' and isinstance(value, dict))
|
||||
for key, value in kwargs.items()
|
||||
)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.__dict__[key]
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
raise NotImplementedError
|
||||
|
||||
def __delattr__(self, key):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SerialProxy(SimpleNamespace):
|
||||
"""An attribute-based namespace not prefixed by `_` and restricted to few types.
|
||||
|
||||
It pretends to be a zeep `CompoundValue` so zeep.helpers.serialize_object threats it as such.
|
||||
|
||||
`__getitem__` and `__delitem__` are supported, but `__setitem__` is prevented,
|
||||
e.g.
|
||||
```py
|
||||
proxy = SerialProxy(foo='foo')
|
||||
proxy.foo # Allowed
|
||||
proxy['foo'] # Allowed
|
||||
proxy.foo = 'bar' # Allowed
|
||||
proxy['foo'] = 'bar' # Prevented
|
||||
del proxy.foo # Allowed
|
||||
del proxy['foo'] # Allowed
|
||||
```
|
||||
"""
|
||||
|
||||
# Pretend to be a CompoundValue so zeep can serialize this when sending a request with this object in the payload
|
||||
# https://stackoverflow.com/a/42958013
|
||||
# https://github.com/mvantellingen/python-zeep/blob/a65b4363c48b5c3f687b8df570bcbada8ba66b9b/src/zeep/helpers.py#L15
|
||||
@property
|
||||
def __class__(self):
|
||||
return zeep.xsd.valueobjects.CompoundValue
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for key, value in kwargs.items():
|
||||
self.__check(key, value)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
self.__check(key, value)
|
||||
return super().__setattr__(key, value)
|
||||
|
||||
def __getitem__(self, key):
|
||||
self.__check(key, None)
|
||||
return self.__getattribute__(key)
|
||||
|
||||
# Not required as SimpleNamespace doesn't implement it by default, but this makes it explicit.
|
||||
def __setitem__(self, key, value):
|
||||
raise NotImplementedError
|
||||
|
||||
def __delitem__(self, key):
|
||||
self.__check(key, None)
|
||||
self.__delattr__(key)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__dict__)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.__dict__)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__dict__)
|
||||
|
||||
def keys(self):
|
||||
return self.__dict__.keys()
|
||||
|
||||
def values(self):
|
||||
return self.__dict__.values()
|
||||
|
||||
def items(self):
|
||||
return self.__dict__.items()
|
||||
|
||||
@classmethod
|
||||
def __check(cls, key, value):
|
||||
assert not key.startswith('_')
|
||||
assert type(value) in SERIALIZABLE_TYPES + (SerialProxy,)
|
||||
1
odoo-bringout-oca-ocb-base/odoo/tools/zeep/exceptions.py
Normal file
1
odoo-bringout-oca-ocb-base/odoo/tools/zeep/exceptions.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
from zeep.exceptions import *
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue