19.0 vanilla

This commit is contained in:
Ernad Husremovic 2025-10-03 18:07:25 +02:00
parent 0a7ae8db93
commit 991d2234ca
416 changed files with 646602 additions and 300844 deletions

View file

@ -0,0 +1,20 @@
"""The implementation of the ORM.
A `Registry` object is instantiated per database, and exposes all the available
models for its database. The available models are determined by the modules
that must be loaded for the given database.
The `decorators` defines various method decorators.
The 'environments` defines `Transaction`, collecting database
transaction-specific data, and `Environment`, which contains specific
context-dependent data inside a transaction.
The `fields` file defines the base class of fields for models.
After loading it, you may load scalar fields.
Finally, `models` provides the base classes for defining models.
You may now define relational fields.
We export the needed features in various packages and developers should not
import directly from here.
"""
# import first for core setup
import odoo.init # noqa: F401

View file

@ -0,0 +1,130 @@
from __future__ import annotations
import enum
import typing
if typing.TYPE_CHECKING:
from collections.abc import Collection
from .types import ValuesType
class Command(enum.IntEnum):
"""
:class:`~odoo.fields.One2many` and :class:`~odoo.fields.Many2many` fields
expect a special command to manipulate the relation they implement.
Internally, each command is a 3-elements tuple where the first element is a
mandatory integer that identifies the command, the second element is either
the related record id to apply the command on (commands update, delete,
unlink and link) either 0 (commands create, clear and set), the third
element is either the ``values`` to write on the record (commands create
and update) either the new ``ids`` list of related records (command set),
either 0 (commands delete, unlink, link, and clear).
This triplet is aliased as ``CommandValue``.
Via Python, we encourage developers craft new commands via the various
functions of this namespace. We also encourage developers to use the
command identifier constant names when comparing the 1st element of
existing commands.
Via RPC, it is impossible nor to use the functions nor the command constant
names. It is required to instead write the literal 3-elements tuple where
the first element is the integer identifier of the command.
"""
CREATE = 0
UPDATE = 1
DELETE = 2
UNLINK = 3
LINK = 4
CLEAR = 5
SET = 6
@classmethod
def create(cls, values: ValuesType) -> CommandValue:
"""
Create new records in the comodel using ``values``, link the created
records to ``self``.
In case of a :class:`~odoo.fields.Many2many` relation, one unique
new record is created in the comodel such that all records in `self`
are linked to the new record.
In case of a :class:`~odoo.fields.One2many` relation, one new record
is created in the comodel for every record in ``self`` such that every
record in ``self`` is linked to exactly one of the new records.
Return the command triple :samp:`(CREATE, 0, {values})`
"""
return (cls.CREATE, 0, values)
@classmethod
def update(cls, id: int, values: ValuesType) -> CommandValue:
"""
Write ``values`` on the related record.
Return the command triple :samp:`(UPDATE, {id}, {values})`
"""
return (cls.UPDATE, id, values)
@classmethod
def delete(cls, id: int) -> CommandValue:
"""
Remove the related record from the database and remove its relation
with ``self``.
In case of a :class:`~odoo.fields.Many2many` relation, removing the
record from the database may be prevented if it is still linked to
other records.
Return the command triple :samp:`(DELETE, {id}, 0)`
"""
return (cls.DELETE, id, 0)
@classmethod
def unlink(cls, id: int) -> CommandValue:
"""
Remove the relation between ``self`` and the related record.
In case of a :class:`~odoo.fields.One2many` relation, the given record
is deleted from the database if the inverse field is set as
``ondelete='cascade'``. Otherwise, the value of the inverse field is
set to False and the record is kept.
Return the command triple :samp:`(UNLINK, {id}, 0)`
"""
return (cls.UNLINK, id, 0)
@classmethod
def link(cls, id: int) -> CommandValue:
"""
Add a relation between ``self`` and the related record.
Return the command triple :samp:`(LINK, {id}, 0)`
"""
return (cls.LINK, id, 0)
@classmethod
def clear(cls) -> CommandValue:
"""
Remove all records from the relation with ``self``. It behaves like
executing the `unlink` command on every record.
Return the command triple :samp:`(CLEAR, 0, 0)`
"""
return (cls.CLEAR, 0, 0)
@classmethod
def set(cls, ids: Collection[int]) -> CommandValue:
"""
Replace the current relations of ``self`` by the given ones. It behaves
like executing the ``unlink`` command on every removed relation then
executing the ``link`` command on every new relation.
Return the command triple :samp:`(SET, 0, {ids})`
"""
return (cls.SET, 0, ids)
if typing.TYPE_CHECKING:
CommandValue = tuple[Command, int, typing.Literal[0] | ValuesType | Collection[int]]

View file

@ -0,0 +1,368 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""The Odoo API module defines method decorators.
"""
from __future__ import annotations
import logging
import typing
import warnings
from collections.abc import Mapping
from functools import wraps
try:
# available since python 3.13
from warnings import deprecated
except ImportError:
# simplified version
class deprecated:
def __init__(
self,
message: str,
/,
*,
category: type[Warning] | None = DeprecationWarning,
stacklevel: int = 1,
) -> None:
self.message = message
self.category = category
self.stacklevel = stacklevel
def __call__(self, obj, /):
message = self.message
category = self.category
stacklevel = self.stacklevel
if category is None:
obj.__deprecated__ = message
return obj
if callable(obj):
@wraps(obj)
def wrapper(*args, **kwargs):
warnings.warn(message, category=category, stacklevel=stacklevel + 1)
return obj(*args, **kwargs)
obj.__deprecated__ = wrapper.__deprecated__ = message
return wrapper
raise TypeError(f"@deprecated decorator cannot be applied to {obj!r}")
if typing.TYPE_CHECKING:
from collections.abc import Callable, Collection
from .types import BaseModel, ValuesType
T = typing.TypeVar('T')
C = typing.TypeVar("C", bound=Callable)
Decorator = Callable[[C], C]
_logger = logging.getLogger('odoo.api')
# The following attributes are used, and reflected on wrapping methods:
# - method._constrains: set by @constrains, specifies constraint dependencies
# - method._depends: set by @depends, specifies compute dependencies
# - method._onchange: set by @onchange, specifies onchange fields
# - method._ondelete: set by @ondelete, used to raise errors for unlink operations
#
# On wrapping method only:
# - method._api_*: decorator function, used for re-applying decorator
#
def attrsetter(attr, value) -> Decorator:
""" Return a function that sets ``attr`` on its argument and returns it. """
def setter(method):
setattr(method, attr, value)
return method
return setter
@typing.overload
def constrains(func: Callable[[BaseModel], Collection[str]], /) -> Decorator:
...
@typing.overload
def constrains(*args: str) -> Decorator:
...
def constrains(*args) -> Decorator:
"""Decorate a constraint checker.
Each argument must be a field name used in the check::
@api.constrains('name', 'description')
def _check_description(self):
for record in self:
if record.name == record.description:
raise ValidationError("Fields name and description must be different")
Invoked on the records on which one of the named fields has been modified.
Should raise :exc:`~odoo.exceptions.ValidationError` if the
validation failed.
.. warning::
``@constrains`` only supports simple field names, dotted names
(fields of relational fields e.g. ``partner_id.customer``) are not
supported and will be ignored.
``@constrains`` will be triggered only if the declared fields in the
decorated method are included in the ``create`` or ``write`` call.
It implies that fields not present in a view will not trigger a call
during a record creation. A override of ``create`` is necessary to make
sure a constraint will always be triggered (e.g. to test the absence of
value).
One may also pass a single function as argument. In that case, the field
names are given by calling the function with a model instance.
"""
if args and callable(args[0]):
args = args[0]
return attrsetter('_constrains', args)
def ondelete(*, at_uninstall: bool) -> Decorator:
"""
Mark a method to be executed during :meth:`~odoo.models.BaseModel.unlink`.
The goal of this decorator is to allow client-side errors when unlinking
records if, from a business point of view, it does not make sense to delete
such records. For instance, a user should not be able to delete a validated
sales order.
While this could be implemented by simply overriding the method ``unlink``
on the model, it has the drawback of not being compatible with module
uninstallation. When uninstalling the module, the override could raise user
errors, but we shouldn't care because the module is being uninstalled, and
thus **all** records related to the module should be removed anyway.
This means that by overriding ``unlink``, there is a big chance that some
tables/records may remain as leftover data from the uninstalled module. This
leaves the database in an inconsistent state. Moreover, there is a risk of
conflicts if the module is ever reinstalled on that database.
Methods decorated with ``@ondelete`` should raise an error following some
conditions, and by convention, the method should be named either
``_unlink_if_<condition>`` or ``_unlink_except_<not_condition>``.
.. code-block:: python
@api.ondelete(at_uninstall=False)
def _unlink_if_user_inactive(self):
if any(user.active for user in self):
raise UserError("Can't delete an active user!")
# same as above but with _unlink_except_* as method name
@api.ondelete(at_uninstall=False)
def _unlink_except_active_user(self):
if any(user.active for user in self):
raise UserError("Can't delete an active user!")
:param bool at_uninstall: Whether the decorated method should be called if
the module that implements said method is being uninstalled. Should
almost always be ``False``, so that module uninstallation does not
trigger those errors.
.. danger::
The parameter ``at_uninstall`` should only be set to ``True`` if the
check you are implementing also applies when uninstalling the module.
For instance, it doesn't matter if when uninstalling ``sale``, validated
sales orders are being deleted because all data pertaining to ``sale``
should be deleted anyway, in that case ``at_uninstall`` should be set to
``False``.
However, it makes sense to prevent the removal of the default language
if no other languages are installed, since deleting the default language
will break a lot of basic behavior. In this case, ``at_uninstall``
should be set to ``True``.
"""
return attrsetter('_ondelete', at_uninstall)
def onchange(*args: str) -> Decorator:
"""Return a decorator to decorate an onchange method for given fields.
In the form views where the field appears, the method will be called
when one of the given fields is modified. The method is invoked on a
pseudo-record that contains the values present in the form. Field
assignments on that record are automatically sent back to the client.
Each argument must be a field name::
@api.onchange('partner_id')
def _onchange_partner(self):
self.message = "Dear %s" % (self.partner_id.name or "")
.. code-block:: python
return {
'warning': {'title': "Warning", 'message': "What is this?", 'type': 'notification'},
}
If the type is set to notification, the warning will be displayed in a notification.
Otherwise it will be displayed in a dialog as default.
.. warning::
``@onchange`` only supports simple field names, dotted names
(fields of relational fields e.g. ``partner_id.tz``) are not
supported and will be ignored
.. danger::
Since ``@onchange`` returns a recordset of pseudo-records,
calling any one of the CRUD methods
(:meth:`create`, :meth:`read`, :meth:`write`, :meth:`unlink`)
on the aforementioned recordset is undefined behaviour,
as they potentially do not exist in the database yet.
Instead, simply set the record's field like shown in the example
above or call the :meth:`update` method.
.. warning::
It is not possible for a ``one2many`` or ``many2many`` field to modify
itself via onchange. This is a webclient limitation - see `#2693 <https://github.com/odoo/odoo/issues/2693>`_.
"""
return attrsetter('_onchange', args)
@typing.overload
def depends(func: Callable[[BaseModel], Collection[str]], /) -> Decorator:
...
@typing.overload
def depends(*args: str) -> Decorator:
...
def depends(*args) -> Decorator:
""" Return a decorator that specifies the field dependencies of a "compute"
method (for new-style function fields). Each argument must be a string
that consists in a dot-separated sequence of field names::
pname = fields.Char(compute='_compute_pname')
@api.depends('partner_id.name', 'partner_id.is_company')
def _compute_pname(self):
for record in self:
if record.partner_id.is_company:
record.pname = (record.partner_id.name or "").upper()
else:
record.pname = record.partner_id.name
One may also pass a single function as argument. In that case, the
dependencies are given by calling the function with the field's model.
"""
if args and callable(args[0]):
args = args[0]
elif any('id' in arg.split('.') for arg in args):
raise NotImplementedError("Compute method cannot depend on field 'id'.")
return attrsetter('_depends', args)
def depends_context(*args: str) -> Decorator:
""" Return a decorator that specifies the context dependencies of a
non-stored "compute" method. Each argument is a key in the context's
dictionary::
price = fields.Float(compute='_compute_product_price')
@api.depends_context('pricelist')
def _compute_product_price(self):
for product in self:
if product.env.context.get('pricelist'):
pricelist = self.env['product.pricelist'].browse(product.env.context['pricelist'])
else:
pricelist = self.env['product.pricelist'].get_default_pricelist()
product.price = pricelist._get_products_price(product).get(product.id, 0.0)
All dependencies must be hashable. The following keys have special
support:
* `company` (value in context or current company id),
* `uid` (current user id and superuser flag),
* `active_test` (value in env.context or value in field.context).
"""
return attrsetter('_depends_context', args)
def autovacuum(method: C) -> C:
"""
Decorate a method so that it is called by the daily vacuum cron job (model
``ir.autovacuum``). This is typically used for garbage-collection-like
tasks that do not deserve a specific cron job.
A return value can be a tuple (done, remaining) which have simular meaning
as in :meth:`~odoo.addons.base.models.ir_cron.IrCron._commit_progress`.
"""
assert method.__name__.startswith('_'), "%s: autovacuum methods must be private" % method.__name__
method._autovacuum = True # type: ignore
return method
def model(method: C) -> C:
""" Decorate a record-style method where ``self`` is a recordset, but its
contents is not relevant, only the model is. Such a method::
@api.model
def method(self, args):
...
"""
if method.__name__ == 'create':
return model_create_multi(method) # type: ignore
method._api_model = True # type: ignore
return method
def private(method: C) -> C:
""" Decorate a record-style method to indicate that the method cannot be
called using RPC. Example::
@api.private
def method(self, args):
...
If you have business methods that should not be called over RPC, you
should prefix them with "_". This decorator may be used in case of
existing public methods that become non-RPC callable or for ORM
methods.
"""
method._api_private = True # type: ignore
return method
def readonly(method: C) -> C:
""" Decorate a record-style method where ``self.env.cr`` can be a
readonly cursor when called trough a rpc call.
@api.readonly
def method(self, args):
...
"""
method._readonly = True # type: ignore
return method
def model_create_multi(method: Callable[[T, list[ValuesType]], T]) -> Callable[[T, list[ValuesType] | ValuesType], T]:
""" Decorate a method that takes a list of dictionaries and creates multiple
records. The method may be called with either a single dict or a list of
dicts::
record = model.create(vals)
records = model.create([vals, ...])
"""
@wraps(method)
def create(self: T, vals_list: list[ValuesType] | ValuesType) -> T:
if isinstance(vals_list, Mapping):
vals_list = [vals_list]
return method(self, vals_list)
create._api_model = True # type: ignore
return create

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,964 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""The Odoo API module defines Odoo Environments.
"""
from __future__ import annotations
import functools
import logging
import pytz
import typing
import warnings
from collections import defaultdict
from collections.abc import Mapping
from contextlib import contextmanager, suppress
from pprint import pformat
from weakref import WeakSet
from odoo.exceptions import AccessError, UserError, CacheMiss
from odoo.sql_db import BaseCursor
from odoo.tools import clean_context, frozendict, reset_cached_properties, OrderedSet, Query, SQL
from odoo.tools.translate import get_translation, get_translated_module, LazyGettext
from odoo.tools.misc import StackMap, SENTINEL
from .registry import Registry
from .utils import SUPERUSER_ID
if typing.TYPE_CHECKING:
from collections.abc import Collection, Iterable, Iterator, MutableMapping
from datetime import tzinfo
from .identifiers import IdType, NewId
from .types import BaseModel, Field
M = typing.TypeVar('M', bound=BaseModel)
_logger = logging.getLogger('odoo.api')
MAX_FIXPOINT_ITERATIONS = 10
class Environment(Mapping[str, "BaseModel"]):
""" The environment stores various contextual data used by the ORM:
- :attr:`cr`: the current database cursor (for database queries);
- :attr:`uid`: the current user id (for access rights checks);
- :attr:`context`: the current context dictionary (arbitrary metadata);
- :attr:`su`: whether in superuser mode.
It provides access to the registry by implementing a mapping from model
names to models. It also holds a cache for records, and a data
structure to manage recomputations.
"""
cr: BaseCursor
uid: int
context: frozendict
su: bool
transaction: Transaction
def reset(self) -> None:
""" Reset the transaction, see :meth:`Transaction.reset`. """
warnings.warn("Since 19.0, use directly `transaction.reset()`", DeprecationWarning)
self.transaction.reset()
def __new__(cls, cr: BaseCursor, uid: int, context: dict, su: bool = False):
assert isinstance(cr, BaseCursor)
if uid == SUPERUSER_ID:
su = True
# determine transaction object
transaction = cr.transaction
if transaction is None:
transaction = cr.transaction = Transaction(Registry(cr.dbname))
# if env already exists, return it
for env in transaction.envs:
if env.cr is cr and env.uid == uid and env.su == su and env.context == context:
return env
# otherwise create environment, and add it in the set
self = object.__new__(cls)
self.cr, self.uid, self.su = cr, uid, su
self.context = frozendict(context)
self.transaction = transaction
transaction.envs.add(self)
# the default transaction's environment is the first one with a valid uid
if transaction.default_env is None and uid and isinstance(uid, int):
transaction.default_env = self
return self
def __setattr__(self, name: str, value: typing.Any) -> None:
# once initialized, attributes are read-only
if name in vars(self):
raise AttributeError(f"Attribute {name!r} is read-only, call `env()` instead")
return super().__setattr__(name, value)
#
# Mapping methods
#
def __contains__(self, model_name) -> bool:
""" Test whether the given model exists. """
return model_name in self.registry
def __getitem__(self, model_name: str) -> BaseModel:
""" Return an empty recordset from the given model. """
return self.registry[model_name](self, (), ())
def __iter__(self):
""" Return an iterator on model names. """
return iter(self.registry)
def __len__(self):
""" Return the size of the model registry. """
return len(self.registry)
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __hash__(self):
return object.__hash__(self)
def __call__(
self,
cr: BaseCursor | None = None,
user: IdType | BaseModel | None = None,
context: dict | None = None,
su: bool | None = None,
) -> Environment:
""" Return an environment based on ``self`` with modified parameters.
:param cr: optional database cursor to change the current cursor
:type cursor: :class:`~odoo.sql_db.Cursor`
:param user: optional user/user id to change the current user
:type user: int or :class:`res.users record<~odoo.addons.base.models.res_users.ResUsers>`
:param dict context: optional context dictionary to change the current context
:param bool su: optional boolean to change the superuser mode
:returns: environment with specified args (new or existing one)
"""
cr = self.cr if cr is None else cr
uid = self.uid if user is None else int(user) # type: ignore
if context is None:
context = clean_context(self.context) if su and not self.su else self.context
su = (user is None and self.su) if su is None else su
return Environment(cr, uid, context, su)
@typing.overload
def ref(self, xml_id: str, raise_if_not_found: typing.Literal[True] = True) -> BaseModel:
...
@typing.overload
def ref(self, xml_id: str, raise_if_not_found: typing.Literal[False]) -> BaseModel | None:
...
def ref(self, xml_id: str, raise_if_not_found: bool = True) -> BaseModel | None:
""" Return the record corresponding to the given ``xml_id``.
:param str xml_id: record xml_id, under the format ``<module.id>``
:param bool raise_if_not_found: whether the method should raise if record is not found
:returns: Found record or None
:raise ValueError: if record wasn't found and ``raise_if_not_found`` is True
"""
res_model, res_id = self['ir.model.data']._xmlid_to_res_model_res_id(
xml_id, raise_if_not_found=raise_if_not_found
)
if res_model and res_id:
record = self[res_model].browse(res_id)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xml_id))
return None
def is_superuser(self) -> bool:
""" Return whether the environment is in superuser mode. """
return self.su
def is_admin(self) -> bool:
""" Return whether the current user has group "Access Rights", or is in
superuser mode. """
return self.su or self.user._is_admin()
def is_system(self) -> bool:
""" Return whether the current user has group "Settings", or is in
superuser mode. """
return self.su or self.user._is_system()
@functools.cached_property
def registry(self) -> Registry:
"""Return the registry associated with the transaction."""
return self.transaction.registry
@functools.cached_property
def _protected(self):
"""Return the protected map of the transaction."""
return self.transaction.protected
@functools.cached_property
def cache(self):
"""Return the cache object of the transaction."""
return self.transaction.cache
@functools.cached_property
def user(self) -> BaseModel:
"""Return the current user (as an instance).
:returns: current user - sudoed
:rtype: :class:`res.users record<~odoo.addons.base.models.res_users.ResUsers>`"""
return self(su=True)['res.users'].browse(self.uid)
@functools.cached_property
def company(self) -> BaseModel:
"""Return the current company (as an instance).
If not specified in the context (`allowed_company_ids`),
fallback on current user main company.
:raise AccessError: invalid or unauthorized `allowed_company_ids` context key content.
:return: current company (default=`self.user.company_id`), with the current environment
:rtype: :class:`res.company record<~odoo.addons.base.models.res_company.Company>`
.. warning::
No sanity checks applied in sudo mode!
When in sudo mode, a user can access any company,
even if not in his allowed companies.
This allows to trigger inter-company modifications,
even if the current user doesn't have access to
the targeted company.
"""
company_ids = self.context.get('allowed_company_ids', [])
if company_ids:
if not self.su:
user_company_ids = self.user._get_company_ids()
if set(company_ids) - set(user_company_ids):
raise AccessError(self._("Access to unauthorized or invalid companies."))
return self['res.company'].browse(company_ids[0])
return self.user.company_id.with_env(self)
@functools.cached_property
def companies(self) -> BaseModel:
"""Return a recordset of the enabled companies by the user.
If not specified in the context(`allowed_company_ids`),
fallback on current user companies.
:raise AccessError: invalid or unauthorized `allowed_company_ids` context key content.
:return: current companies (default=`self.user.company_ids`), with the current environment
:rtype: :class:`res.company recordset<~odoo.addons.base.models.res_company.Company>`
.. warning::
No sanity checks applied in sudo mode !
When in sudo mode, a user can access any company,
even if not in his allowed companies.
This allows to trigger inter-company modifications,
even if the current user doesn't have access to
the targeted company.
"""
company_ids = self.context.get('allowed_company_ids', [])
user_company_ids = self.user._get_company_ids()
if company_ids:
if not self.su:
if set(company_ids) - set(user_company_ids):
raise AccessError(self._("Access to unauthorized or invalid companies."))
return self['res.company'].browse(company_ids)
# By setting the default companies to all user companies instead of the main one
# we save a lot of potential trouble in all "out of context" calls, such as
# /mail/redirect or /web/image, etc. And it is not unsafe because the user does
# have access to these other companies. The risk of exposing foreign records
# (wrt to the context) is low because all normal RPCs will have a proper
# allowed_company_ids.
# Examples:
# - when printing a report for several records from several companies
# - when accessing to a record from the notification email template
# - when loading an binary image on a template
return self['res.company'].browse(user_company_ids)
@functools.cached_property
def tz(self) -> tzinfo:
"""Return the current timezone info, defaults to UTC."""
timezone = self.context.get('tz') or self.user.tz
if timezone:
try:
return pytz.timezone(timezone)
except Exception: # noqa: BLE001
_logger.debug("Invalid timezone %r", timezone, exc_info=True)
return pytz.utc
@functools.cached_property
def lang(self) -> str | None:
"""Return the current language code."""
lang = self.context.get('lang')
if lang and lang != 'en_US' and not self['res.lang']._get_data(code=lang):
# cannot translate here because we do not have a valid language
raise UserError(f'Invalid language code: {lang}') # pylint: disable=missing-gettext
return lang or None
@functools.cached_property
def _lang(self) -> str:
"""Return the technical language code of the current context for **model_terms** translated field
"""
context = self.context
lang = self.lang or 'en_US'
if context.get('edit_translations') or context.get('check_translations'):
lang = '_' + lang
return lang
def _(self, source: str | LazyGettext, *args, **kwargs) -> str:
"""Translate the term using current environment's language.
Usage:
```
self.env._("hello world") # dynamically get module name
self.env._("hello %s", "test")
self.env._(LAZY_TRANSLATION)
```
:param source: String to translate or lazy translation
:param ...: args or kwargs for templating
:return: The transalted string
"""
lang = self.lang or 'en_US'
if isinstance(source, str):
assert not (args and kwargs), "Use args or kwargs, not both"
format_args = args or kwargs
elif isinstance(source, LazyGettext):
# translate a lazy text evaluation
assert not args and not kwargs, "All args should come from the lazy text"
return source._translate(lang)
else:
raise TypeError(f"Cannot translate {source!r}")
if lang == 'en_US':
# we ignore the module as en_US is not translated
return get_translation('base', 'en_US', source, format_args)
try:
module = get_translated_module(2)
return get_translation(module, lang, source, format_args)
except Exception: # noqa: BLE001
_logger.debug('translation went wrong for "%r", skipped', source, exc_info=True)
return source
def clear(self) -> None:
""" Clear all record caches, and discard all fields to recompute.
This may be useful when recovering from a failed ORM operation.
"""
reset_cached_properties(self)
self.transaction.clear()
def invalidate_all(self, flush: bool = True) -> None:
""" Invalidate the cache of all records.
:param flush: whether pending updates should be flushed before invalidation.
It is ``True`` by default, which ensures cache consistency.
Do not use this parameter unless you know what you are doing.
"""
if flush:
self.flush_all()
self.transaction.invalidate_field_data()
def _recompute_all(self) -> None:
""" Process all pending computations. """
for _ in range(MAX_FIXPOINT_ITERATIONS):
# fields to compute on real records (new records are not recomputed)
fields_ = [field for field, ids in self.transaction.tocompute.items() if any(ids)]
if not fields_:
break
for field in fields_:
self[field.model_name]._recompute_field(field)
else:
_logger.warning("Too many iterations for recomputing fields!")
def flush_all(self) -> None:
""" Flush all pending computations and updates to the database. """
for _ in range(MAX_FIXPOINT_ITERATIONS):
self._recompute_all()
model_names = OrderedSet(field.model_name for field in self._field_dirty)
if not model_names:
break
for model_name in model_names:
self[model_name].flush_model()
else:
_logger.warning("Too many iterations for flushing fields!")
def is_protected(self, field: Field, record: BaseModel) -> bool:
""" Return whether `record` is protected against invalidation or
recomputation for `field`.
"""
return record.id in self._protected.get(field, ())
def protected(self, field: Field) -> BaseModel:
""" Return the recordset for which ``field`` should not be invalidated or recomputed. """
return self[field.model_name].browse(self._protected.get(field, ()))
@typing.overload
def protecting(self, what: Collection[Field], records: BaseModel) -> typing.ContextManager[None]:
...
@typing.overload
def protecting(self, what: Collection[tuple[Collection[Field], BaseModel]]) -> typing.ContextManager[None]:
...
@contextmanager
def protecting(self, what, records=None) -> Iterator[None]:
""" Prevent the invalidation or recomputation of fields on records.
The parameters are either:
- ``what`` a collection of fields and ``records`` a recordset, or
- ``what`` a collection of pairs ``(fields, records)``.
"""
protected = self._protected
try:
protected.pushmap()
if records is not None: # convert first signature to second one
what = [(what, records)]
ids_by_field = defaultdict(list)
for fields, what_records in what:
for field in fields:
ids_by_field[field].extend(what_records._ids)
for field, rec_ids in ids_by_field.items():
ids = protected.get(field)
protected[field] = ids.union(rec_ids) if ids else frozenset(rec_ids)
yield
finally:
protected.popmap()
def fields_to_compute(self) -> Collection[Field]:
""" Return a view on the field to compute. """
return self.transaction.tocompute.keys()
def records_to_compute(self, field: Field) -> BaseModel:
""" Return the records to compute for ``field``. """
ids = self.transaction.tocompute.get(field, ())
return self[field.model_name].browse(ids)
def is_to_compute(self, field: Field, record: BaseModel) -> bool:
""" Return whether ``field`` must be computed on ``record``. """
return record.id in self.transaction.tocompute.get(field, ())
def not_to_compute(self, field: Field, records: BaseModel) -> BaseModel:
""" Return the subset of ``records`` for which ``field`` must not be computed. """
ids = self.transaction.tocompute.get(field, ())
return records.browse(id_ for id_ in records._ids if id_ not in ids)
def add_to_compute(self, field: Field, records: BaseModel) -> None:
""" Mark ``field`` to be computed on ``records``. """
if not records:
return
assert field.store and field.compute, "Cannot add to recompute no-store or no-computed field"
self.transaction.tocompute[field].update(records._ids)
def remove_to_compute(self, field: Field, records: BaseModel) -> None:
""" Mark ``field`` as computed on ``records``. """
if not records:
return
ids = self.transaction.tocompute.get(field, None)
if ids is None:
return
ids.difference_update(records._ids)
if not ids:
del self.transaction.tocompute[field]
def cache_key(self, field: Field) -> typing.Any:
""" Return the cache key of the given ``field``. """
def get(key, get_context=self.context.get):
if key == 'company':
return self.company.id
elif key == 'uid':
return self.uid if field.compute_sudo else (self.uid, self.su)
elif key == 'lang':
return get_context('lang') or None
elif key == 'active_test':
return get_context('active_test', field.context.get('active_test', True))
elif key.startswith('bin_size'):
return bool(get_context(key))
else:
val = get_context(key)
if type(val) is list:
val = tuple(val)
try:
hash(val)
except TypeError:
raise TypeError(
"Can only create cache keys from hashable values, "
f"got non-hashable value {val!r} at context key {key!r} "
f"(dependency of field {field})"
) from None # we don't need to chain the exception created 2 lines above
else:
return val
return tuple(get(key) for key in self.registry.field_depends_context[field])
@functools.cached_property
def _field_cache_memo(self) -> dict[Field, MutableMapping[IdType, typing.Any]]:
"""Memo for `Field._get_cache(env)`. Do not use it."""
return {}
@functools.cached_property
def _field_dirty(self):
""" Map fields to set of dirty ids. """
return self.transaction.field_dirty
@functools.cached_property
def _field_depends_context(self):
return self.registry.field_depends_context
def flush_query(self, query: SQL) -> None:
""" Flush all the fields in the metadata of ``query``. """
fields_to_flush = tuple(query.to_flush)
if not fields_to_flush:
return
fnames_to_flush = defaultdict[str, OrderedSet[str]](OrderedSet)
for field in fields_to_flush:
fnames_to_flush[field.model_name].add(field.name)
for model_name, field_names in fnames_to_flush.items():
self[model_name].flush_model(field_names)
def execute_query(self, query: SQL) -> list[tuple]:
""" Execute the given query, fetch its result and it as a list of tuples
(or an empty list if no result to fetch). The method automatically
flushes all the fields in the metadata of the query.
"""
assert isinstance(query, SQL)
self.flush_query(query)
self.cr.execute(query)
return [] if self.cr.description is None else self.cr.fetchall()
def execute_query_dict(self, query: SQL) -> list[dict]:
""" Execute the given query, fetch its results as a list of dicts.
The method automatically flushes fields in the metadata of the query.
"""
rows = self.execute_query(query)
if not rows:
return []
description = self.cr.description
assert description is not None, "No cr.description, the executed query does not return a table."
return [
{column.name: row[index] for index, column in enumerate(description)}
for row in rows
]
class Transaction:
""" A object holding ORM data structures for a transaction. """
__slots__ = (
'_Transaction__file_open_tmp_paths', 'cache',
'default_env', 'envs', 'field_data', 'field_data_patches', 'field_dirty',
'protected', 'registry', 'tocompute',
)
def __init__(self, registry: Registry):
self.registry = registry
# weak OrderedSet of environments
self.envs = WeakSet[Environment]()
self.envs.data = OrderedSet() # type: ignore[attr-defined]
# default environment (for flushing)
self.default_env: Environment | None = None
# cache data {field: cache_data_managed_by_field} often uses a dict
# to store a mapping from id to a value, but fields may use this field
# however they need
self.field_data = defaultdict["Field", typing.Any](dict)
# {field: set[id]} stores the fields and ids that are changed in the
# cache, but not yet written in the database; their changed values are
# in `data`
self.field_dirty = defaultdict["Field", OrderedSet["IdType"]](OrderedSet)
# {field: {record_id: ids}} record ids to be added to the values of
# x2many fields if they are not in cache yet
self.field_data_patches = defaultdict["Field", defaultdict["IdType", list["IdType"]]](lambda: defaultdict(list))
# fields to protect {field: ids}
self.protected = StackMap["Field", OrderedSet["IdType"]]()
# pending computations {field: ids}
self.tocompute = defaultdict["Field", OrderedSet["IdType"]](OrderedSet)
# backward-compatible view of the cache
self.cache = Cache(self)
# temporary directories (managed in odoo.tools.file_open_temporary_directory)
self.__file_open_tmp_paths = () # type: ignore # noqa: PLE0237
def flush(self) -> None:
""" Flush pending computations and updates in the transaction. """
if self.default_env is not None:
self.default_env.flush_all()
else:
for env in self.envs:
_logger.warning("Missing default_env, flushing as public user")
public_user = env.ref('base.public_user')
Environment(env.cr, public_user.id, {}).flush_all()
break
def clear(self):
""" Clear the caches and pending computations and updates in the transactions. """
self.invalidate_field_data()
self.field_data_patches.clear()
self.field_dirty.clear()
self.tocompute.clear()
for env in self.envs:
env.cr.cache.clear()
break # all envs of the transaction share the same cursor
def reset(self) -> None:
""" Reset the transaction. This clears the transaction, and reassigns
the registry on all its environments. This operation is strongly
recommended after reloading the registry.
"""
self.registry = Registry(self.registry.db_name)
for env in self.envs:
reset_cached_properties(env)
self.clear()
def invalidate_field_data(self) -> None:
""" Invalidate the cache of all the fields.
This operation is unsafe by default, and must be used with care.
Indeed, invalidating a dirty field on a record may lead to an error,
because doing so drops the value to be written in database.
"""
self.field_data.clear()
# reset Field._get_cache()
for env in self.envs:
with suppress(AttributeError):
del env._field_cache_memo
# sentinel value for optional parameters
EMPTY_DICT = frozendict() # type: ignore
class Cache:
""" Implementation of the cache of records.
For most fields, the cache is simply a mapping from a record and a field to
a value. In the case of context-dependent fields, the mapping also depends
on the environment of the given record. For the sake of performance, the
cache is first partitioned by field, then by record. This makes some
common ORM operations pretty fast, like determining which records have a
value for a given field, or invalidating a given field on all possible
records.
The cache can also mark some entries as "dirty". Dirty entries essentially
marks values that are different from the database. They represent database
updates that haven't been done yet. Note that dirty entries only make
sense for stored fields. Note also that if a field is dirty on a given
record, and the field is context-dependent, then all the values of the
record for that field are considered dirty. For the sake of consistency,
the values that should be in the database must be in a context where all
the field's context keys are ``None``.
"""
__slots__ = ('transaction',)
def __init__(self, transaction: Transaction):
self.transaction = transaction
def __repr__(self) -> str:
# for debugging: show the cache content and dirty flags as stars
data: dict[Field, dict] = {}
for field, field_cache in sorted(self.transaction.field_data.items(), key=lambda item: str(item[0])):
dirty_ids = self.transaction.field_dirty.get(field, ())
if field in self.transaction.registry.field_depends_context:
data[field] = {
key: {
Starred(id_) if id_ in dirty_ids else id_: val if field.type != 'binary' else '<binary>'
for id_, val in key_cache.items()
}
for key, key_cache in field_cache.items()
}
else:
data[field] = {
Starred(id_) if id_ in dirty_ids else id_: val if field.type != 'binary' else '<binary>'
for id_, val in field_cache.items()
}
return repr(data)
def _get_field_cache(self, model: BaseModel, field: Field) -> Mapping[IdType, typing.Any]:
""" Return the field cache of the given field, but not for modifying it. """
return self._set_field_cache(model, field)
def _set_field_cache(self, model: BaseModel, field: Field) -> dict[IdType, typing.Any]:
""" Return the field cache of the given field for modifying it. """
return field._get_cache(model.env)
def contains(self, record: BaseModel, field: Field) -> bool:
""" Return whether ``record`` has a value for ``field``. """
return record.id in self._get_field_cache(record, field)
def contains_field(self, field: Field) -> bool:
""" Return whether ``field`` has a value for at least one record. """
cache = self.transaction.field_data.get(field)
if not cache:
return False
# 'cache' keys are tuples if 'field' is context-dependent, record ids otherwise
if field in self.transaction.registry.field_depends_context:
return any(value for value in cache.values())
return True
def get(self, record: BaseModel, field: Field, default=SENTINEL):
""" Return the value of ``field`` for ``record``. """
try:
field_cache = self._get_field_cache(record, field)
return field_cache[record._ids[0]]
except KeyError:
if default is SENTINEL:
raise CacheMiss(record, field) from None
return default
def set(self, record: BaseModel, field: Field, value: typing.Any, dirty: bool = False) -> None:
""" Set the value of ``field`` for ``record``.
One can normally make a clean field dirty but not the other way around.
Updating a dirty field without ``dirty=True`` is a programming error and
raises an exception.
:param dirty: whether ``field`` must be made dirty on ``record`` after
the update
"""
field._update_cache(record, value, dirty=dirty)
def update(self, records: BaseModel, field: Field, values: Iterable, dirty: bool = False) -> None:
""" Set the values of ``field`` for several ``records``.
One can normally make a clean field dirty but not the other way around.
Updating a dirty field without ``dirty=True`` is a programming error and
raises an exception.
:param dirty: whether ``field`` must be made dirty on ``record`` after
the update
"""
for record, value in zip(records, values):
field._update_cache(record, value, dirty=dirty)
def update_raw(self, records: BaseModel, field: Field, values: Iterable, dirty: bool = False) -> None:
""" This is a variant of method :meth:`~update` without the logic for
translated fields.
"""
if field.translate:
records = records.with_context(prefetch_langs=True)
for record, value in zip(records, values):
field._update_cache(record, value, dirty=dirty)
def insert_missing(self, records: BaseModel, field: Field, values: Iterable) -> None:
""" Set the values of ``field`` for the records in ``records`` that
don't have a value yet. In other words, this does not overwrite
existing values in cache.
"""
warnings.warn("Since 19.0, use Field._insert_cache", DeprecationWarning)
field._insert_cache(records, values)
def patch(self, records: BaseModel, field: Field, new_id: NewId):
""" Apply a patch to an x2many field on new records. The patch consists
in adding new_id to its value in cache. If the value is not in cache
yet, it will be applied once the value is put in cache with method
:meth:`patch_and_set`.
"""
warnings.warn("Since 19.0, this method is internal", DeprecationWarning)
from .fields_relational import _RelationalMulti # noqa: PLC0415
assert isinstance(field, _RelationalMulti)
value = records.env[field.comodel_name].browse((new_id,))
field._update_inverse(records, value)
def patch_and_set(self, record: BaseModel, field: Field, value: typing.Any) -> typing.Any:
""" Set the value of ``field`` for ``record``, like :meth:`set`, but
apply pending patches to ``value`` and return the value actually put
in cache.
"""
warnings.warn("Since 19.0, this method is internal", DeprecationWarning)
field._update_cache(record, value)
return self.get(record, field)
def remove(self, record: BaseModel, field: Field) -> None:
""" Remove the value of ``field`` for ``record``. """
assert record.id not in self.transaction.field_dirty.get(field, ())
try:
field_cache = self._set_field_cache(record, field)
del field_cache[record._ids[0]]
except KeyError:
pass
def get_values(self, records: BaseModel, field: Field) -> Iterator[typing.Any]:
""" Return the cached values of ``field`` for ``records``. """
field_cache = self._get_field_cache(records, field)
for record_id in records._ids:
try:
yield field_cache[record_id]
except KeyError:
pass
def get_until_miss(self, records: BaseModel, field: Field) -> list[typing.Any]:
""" Return the cached values of ``field`` for ``records`` until a value is not found. """
warnings.warn("Since 19.0, this is managed directly by Field")
field_cache = self._get_field_cache(records, field)
vals = []
for record_id in records._ids:
try:
vals.append(field_cache[record_id])
except KeyError:
break
return vals
def get_records_different_from(self, records: M, field: Field, value: typing.Any) -> M:
""" Return the subset of ``records`` that has not ``value`` for ``field``. """
warnings.warn("Since 19.0, becomes internal function of fields", DeprecationWarning)
return field._filter_not_equal(records, value)
def get_fields(self, record: BaseModel) -> Iterator[Field]:
""" Return the fields with a value for ``record``. """
for name, field in record._fields.items():
if name != 'id' and record.id in self._get_field_cache(record, field):
yield field
def get_records(self, model: BaseModel, field: Field, all_contexts: bool = False) -> BaseModel:
""" Return the records of ``model`` that have a value for ``field``.
By default the method checks for values in the current context of ``model``.
But when ``all_contexts`` is true, it checks for values *in all contexts*.
"""
ids: Iterable
if all_contexts and field in model.pool.field_depends_context:
field_cache = self.transaction.field_data.get(field, EMPTY_DICT)
ids = OrderedSet(id_ for sub_cache in field_cache.values() for id_ in sub_cache)
else:
ids = self._get_field_cache(model, field)
return model.browse(ids)
def get_missing_ids(self, records: BaseModel, field: Field) -> Iterator[IdType]:
""" Return the ids of ``records`` that have no value for ``field``. """
return field._cache_missing_ids(records)
def get_dirty_fields(self) -> Collection[Field]:
""" Return the fields that have dirty records in cache. """
warnings.warn("Since 19.0, don't use Cache to manipulate dirty fields")
return self.transaction.field_dirty.keys()
def filtered_dirty_records(self, records: BaseModel, field: Field) -> BaseModel:
""" Filtered ``records`` where ``field`` is dirty. """
warnings.warn("Since 19.0, don't use Cache to manipulate dirty fields")
dirties = self.transaction.field_dirty.get(field, ())
return records.browse(id_ for id_ in records._ids if id_ in dirties)
def filtered_clean_records(self, records: BaseModel, field: Field) -> BaseModel:
""" Filtered ``records`` where ``field`` is not dirty. """
warnings.warn("Since 19.0, don't use Cache to manipulate dirty fields")
dirties = self.transaction.field_dirty.get(field, ())
return records.browse(id_ for id_ in records._ids if id_ not in dirties)
def has_dirty_fields(self, records: BaseModel, fields: Collection[Field] | None = None) -> bool:
""" Return whether any of the given records has dirty fields.
:param fields: a collection of fields or ``None``; the value ``None`` is
interpreted as any field on ``records``
"""
warnings.warn("Since 19.0, don't use Cache to manipulate dirty fields")
if fields is None:
return any(
not ids.isdisjoint(records._ids)
for field, ids in self.transaction.field_dirty.items()
if field.model_name == records._name
)
else:
return any(
field in self.transaction.field_dirty and not self.transaction.field_dirty[field].isdisjoint(records._ids)
for field in fields
)
def clear_dirty_field(self, field: Field) -> Collection[IdType]:
""" Make the given field clean on all records, and return the ids of the
formerly dirty records for the field.
"""
warnings.warn("Since 19.0, don't use Cache to manipulate dirty fields")
return self.transaction.field_dirty.pop(field, ())
def invalidate(self, spec: Collection[tuple[Field, Collection[IdType] | None]] | None = None) -> None:
""" Invalidate the cache, partially or totally depending on ``spec``.
If a field is context-dependent, invalidating it for a given record
actually invalidates all the values of that field on the record. In
other words, the field is invalidated for the record in all
environments.
This operation is unsafe by default, and must be used with care.
Indeed, invalidating a dirty field on a record may lead to an error,
because doing so drops the value to be written in database.
spec = [(field, ids), (field, None), ...]
"""
if spec is None:
self.transaction.invalidate_field_data()
return
env = next(iter(self.transaction.envs))
for field, ids in spec:
field._invalidate_cache(env, ids)
def clear(self):
""" Invalidate the cache and its dirty flags. """
self.transaction.invalidate_field_data()
self.transaction.field_dirty.clear()
self.transaction.field_data_patches.clear()
def check(self, env: Environment) -> None:
""" Check the consistency of the cache for the given environment. """
depends_context = env.registry.field_depends_context
invalids = []
def process(model: BaseModel, field: Field, field_cache):
# ignore new records and records to flush
dirty_ids = self.transaction.field_dirty.get(field, ())
ids = [id_ for id_ in field_cache if id_ and id_ not in dirty_ids]
if not ids:
return
# select the column for the given ids
query = Query(env, model._table, model._table_sql)
sql_id = SQL.identifier(model._table, 'id')
sql_field = model._field_to_sql(model._table, field.name, query)
if field.type == 'binary' and (
model.env.context.get('bin_size') or model.env.context.get('bin_size_' + field.name)
):
sql_field = SQL('pg_size_pretty(length(%s)::bigint)', sql_field)
query.add_where(SQL("%s IN %s", sql_id, tuple(ids)))
env.cr.execute(query.select(sql_id, sql_field))
# compare returned values with corresponding values in cache
for id_, value in env.cr.fetchall():
cached = field_cache[id_]
if value == cached or (not value and not cached):
continue
invalids.append((model.browse((id_,)), field, {'cached': cached, 'fetched': value}))
for field, field_cache in self.transaction.field_data.items():
# check column fields only
if not field.store or not field.column_type or field.translate or field.company_dependent:
continue
model = env[field.model_name]
if field in depends_context:
for context_keys, inner_cache in field_cache.items():
context = dict[str, typing.Any](zip(depends_context[field], context_keys))
if 'company' in context:
# the cache key 'company' actually comes from context
# key 'allowed_company_ids' (see property env.company
# and method env.cache_key())
context['allowed_company_ids'] = [context.pop('company')]
process(model.with_context(context), field, inner_cache)
else:
process(model, field, field_cache)
if invalids:
_logger.warning("Invalid cache: %s", pformat(invalids))
class Starred:
""" Simple helper class to ``repr`` a value with a star suffix. """
__slots__ = ['value']
def __init__(self, value):
self.value = value
def __repr__(self):
return f"{self.value!r}*"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,359 @@
from __future__ import annotations
import base64
import binascii
import contextlib
import functools
import typing
import warnings
from operator import attrgetter
import psycopg2
from odoo.exceptions import UserError
from odoo.tools import SQL, human_size
from odoo.tools.mimetypes import guess_mimetype
from .fields import Field
from .utils import SQL_OPERATORS
if typing.TYPE_CHECKING:
from odoo.tools import Query
from .models import BaseModel
# http://initd.org/psycopg/docs/usage.html#binary-adaptation
# Received data is returned as buffer (in Python 2) or memoryview (in Python 3).
_BINARY = memoryview
class Binary(Field):
"""Encapsulates a binary content (e.g. a file).
:param bool attachment: whether the field should be stored as `ir_attachment`
or in a column of the model's table (default: ``True``).
"""
type = 'binary'
prefetch = False # not prefetched by default
_depends_context = ('bin_size',) # depends on context (content or size)
attachment = True # whether value is stored in attachment
@functools.cached_property
def column_type(self):
return None if self.attachment else ('bytea', 'bytea')
def _get_attrs(self, model_class, name):
attrs = super()._get_attrs(model_class, name)
if not attrs.get('store', True):
attrs['attachment'] = False
return attrs
_description_attachment = property(attrgetter('attachment'))
def convert_to_column(self, value, record, values=None, validate=True):
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast here.
# This str() coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
if not value:
return None
# Detect if the binary content is an SVG for restricting its upload
# only to system users.
magic_bytes = {
b'P', # first 6 bits of '<' (0x3C) b64 encoded
b'<', # plaintext XML tag opening
}
if isinstance(value, str):
value = value.encode()
if validate and value[:1] in magic_bytes:
try:
decoded_value = base64.b64decode(value.translate(None, delete=b'\r\n'), validate=True)
except binascii.Error:
decoded_value = value
# Full mimetype detection
if (guess_mimetype(decoded_value).startswith('image/svg') and
not record.env.is_system()):
raise UserError(record.env._("Only admins can upload SVG files."))
if isinstance(value, bytes):
return psycopg2.Binary(value)
try:
return psycopg2.Binary(str(value).encode('ascii'))
except UnicodeEncodeError:
raise UserError(record.env._("ASCII characters are required for %(value)s in %(field)s", value=value, field=self.name))
def get_column_update(self, record: BaseModel):
# since the field depends on context, force the value where we have the data
bin_size_name = 'bin_size_' + self.name
record_no_bin_size = record.with_context(**{'bin_size': False, bin_size_name: False})
return self._get_cache(record_no_bin_size.env)[record.id]
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, _BINARY):
return bytes(value)
if isinstance(value, str):
# the cache must contain bytes or memoryview, but sometimes a string
# is given when assigning a binary field (test `TestFileSeparator`)
return value.encode()
if isinstance(value, int) and \
(record.env.context.get('bin_size') or
record.env.context.get('bin_size_' + self.name)):
# If the client requests only the size of the field, we return that
# instead of the content. Presumably a separate request will be done
# to read the actual content, if necessary.
value = human_size(value)
# human_size can return False (-> None) or a string (-> encoded)
return value.encode() if value else None
return None if value is False else value
def convert_to_record(self, value, record):
if isinstance(value, _BINARY):
return bytes(value)
return False if value is None else value
def compute_value(self, records):
bin_size_name = 'bin_size_' + self.name
if records.env.context.get('bin_size') or records.env.context.get(bin_size_name):
# always compute without bin_size
records_no_bin_size = records.with_context(**{'bin_size': False, bin_size_name: False})
super().compute_value(records_no_bin_size)
# manually update the bin_size cache
field_cache_data = self._get_cache(records_no_bin_size.env)
field_cache_size = self._get_cache(records.env)
for record in records:
try:
value = field_cache_data[record.id]
# don't decode non-attachments to be consistent with pg_size_pretty
if not (self.store and self.column_type):
with contextlib.suppress(TypeError, binascii.Error):
value = base64.b64decode(value)
try:
if isinstance(value, (bytes, _BINARY)):
value = human_size(len(value))
except (TypeError):
pass
cache_value = self.convert_to_cache(value, record)
# the dirty flag is independent from this assignment
field_cache_size[record.id] = cache_value
except KeyError:
pass
else:
super().compute_value(records)
def read(self, records):
def _encode(s: str | bool) -> bytes | bool:
if isinstance(s, str):
return s.encode("utf-8")
return s
# values are stored in attachments, retrieve them
assert self.attachment
domain = [
('res_model', '=', records._name),
('res_field', '=', self.name),
('res_id', 'in', records.ids),
]
bin_size = records.env.context.get('bin_size')
data = {
att.res_id: _encode(human_size(att.file_size)) if bin_size else att.datas
for att in records.env['ir.attachment'].sudo().search_fetch(domain)
}
self._insert_cache(records, map(data.get, records._ids))
def create(self, record_values):
assert self.attachment
if not record_values:
return
# create the attachments that store the values
env = record_values[0][0].env
env['ir.attachment'].sudo().create([
{
'name': self.name,
'res_model': self.model_name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
}
for record, value in record_values
if value
])
def write(self, records, value):
records = records.with_context(bin_size=False)
if not self.attachment:
super().write(records, value)
return
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache_value = self.convert_to_cache(value, records)
records = self._filter_not_equal(records, cache_value)
if not records:
return
if self.store:
# determine records that are known to be not null
not_null = self._filter_not_equal(records, None)
self._update_cache(records, cache_value)
# retrieve the attachments that store the values, and adapt them
if self.store and any(records._ids):
real_records = records.filtered('id')
atts = records.env['ir.attachment'].sudo()
if not_null:
atts = atts.search([
('res_model', '=', self.model_name),
('res_field', '=', self.name),
('res_id', 'in', real_records.ids),
])
if value:
# update the existing attachments
atts.write({'datas': value})
atts_records = records.browse(atts.mapped('res_id'))
# create the missing attachments
missing = (real_records - atts_records)
if missing:
atts.create([{
'name': self.name,
'res_model': record._name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
}
for record in missing
])
else:
atts.unlink()
def condition_to_sql(self, field_expr: str, operator: str, value, model: BaseModel, alias: str, query: Query) -> SQL:
if not self.attachment or field_expr != self.name:
return super().condition_to_sql(field_expr, operator, value, model, alias, query)
assert operator in ('in', 'not in') and set(value) == {False}, "Should have been done in Domain optimization"
return SQL(
"%s%s(SELECT res_id FROM ir_attachment WHERE res_model = %s AND res_field = %s)",
model._field_to_sql(alias, 'id', query),
SQL_OPERATORS['not in' if operator in ('in', '=') else 'in'],
model._name,
self.name,
)
class Image(Binary):
"""Encapsulates an image, extending :class:`Binary`.
If image size is greater than the ``max_width``/``max_height`` limit of pixels, the image will be
resized to the limit by keeping aspect ratio.
:param int max_width: the maximum width of the image (default: ``0``, no limit)
:param int max_height: the maximum height of the image (default: ``0``, no limit)
:param bool verify_resolution: whether the image resolution should be verified
to ensure it doesn't go over the maximum image resolution (default: ``True``).
See :class:`odoo.tools.image.ImageProcess` for maximum image resolution (default: ``50e6``).
.. note::
If no ``max_width``/``max_height`` is specified (or is set to 0) and ``verify_resolution`` is False,
the field content won't be verified at all and a :class:`Binary` field should be used.
"""
max_width = 0
max_height = 0
verify_resolution = True
def setup(self, model):
super().setup(model)
if not model._abstract and not model._log_access:
warnings.warn(f"Image field {self} requires the model to have _log_access = True", stacklevel=1)
def create(self, record_values):
new_record_values = []
for record, value in record_values:
new_value = self._image_process(value, record.env)
new_record_values.append((record, new_value))
# when setting related image field, keep the unprocessed image in
# cache to let the inverse method use the original image; the image
# will be resized once the inverse has been applied
cache_value = self.convert_to_cache(value if self.related else new_value, record)
self._update_cache(record, cache_value)
super().create(new_record_values)
def write(self, records, value):
try:
new_value = self._image_process(value, records.env)
except UserError:
if not any(records._ids):
# Some crap is assigned to a new record. This can happen in an
# onchange, where the client sends the "bin size" value of the
# field instead of its full value (this saves bandwidth). In
# this case, we simply don't assign the field: its value will be
# taken from the records' origin.
return
raise
super().write(records, new_value)
cache_value = self.convert_to_cache(value if self.related else new_value, records)
self._update_cache(records, cache_value, dirty=True)
def _inverse_related(self, records):
super()._inverse_related(records)
if not (self.max_width and self.max_height):
return
# the inverse has been applied with the original image; now we fix the
# cache with the resized value
for record in records:
value = self._process_related(record[self.name], record.env)
self._update_cache(record, value, dirty=True)
def _image_process(self, value, env):
if self.readonly and not self.max_width and not self.max_height:
# no need to process images for computed fields, or related fields
return value
try:
img = base64.b64decode(value or '') or False
except Exception as e:
raise UserError(env._("Image is not encoded in base64.")) from e
if img and guess_mimetype(img, '') == 'image/webp':
if not self.max_width and not self.max_height:
return value
# Fetch resized version.
Attachment = env['ir.attachment']
checksum = Attachment._compute_checksum(img)
origins = Attachment.search([
['id', '!=', False], # No implicit condition on res_field.
['checksum', '=', checksum],
])
if origins:
origin_ids = [attachment.id for attachment in origins]
resized_domain = [
['id', '!=', False], # No implicit condition on res_field.
['res_model', '=', 'ir.attachment'],
['res_id', 'in', origin_ids],
['description', '=', 'resize: %s' % max(self.max_width, self.max_height)],
]
resized = Attachment.sudo().search(resized_domain, limit=1)
if resized:
# Fallback on non-resized image (value).
return resized.datas or value
return value
# delay import of image_process until this point
from odoo.tools.image import image_process # noqa: PLC0415
return base64.b64encode(image_process(img,
size=(self.max_width, self.max_height),
verify_resolution=self.verify_resolution,
) or b'') or False
def _process_related(self, value, env):
"""Override to resize the related value before saving it on self."""
try:
return self._image_process(super()._process_related(value, env), env)
except UserError:
# Avoid the following `write` to fail if the related image was saved
# invalid, which can happen for pre-existing databases.
return False

View file

@ -0,0 +1,133 @@
from __future__ import annotations
import copy
import json
import typing
from psycopg2.extras import Json as PsycopgJson
from odoo.tools import SQL
from .fields import Field
from .identifiers import IdType
if typing.TYPE_CHECKING:
from .models import BaseModel
from odoo.tools import Query
# integer needs to be imported before Id because of `type` attribute clash
from . import fields_numeric # noqa: F401
class Boolean(Field[bool]):
""" Encapsulates a :class:`bool`. """
type = 'boolean'
_column_type = ('bool', 'bool')
falsy_value = False
def convert_to_column(self, value, record, values=None, validate=True):
return bool(value)
def convert_to_cache(self, value, record, validate=True):
return bool(value)
def convert_to_export(self, value, record):
return bool(value)
def _condition_to_sql(self, field_expr: str, operator: str, value, model: BaseModel, alias: str, query: Query) -> SQL:
if operator not in ('in', 'not in'):
return super()._condition_to_sql(field_expr, operator, value, model, alias, query)
# get field and check access
sql_field = model._field_to_sql(alias, field_expr, query)
# express all conditions as (field_expr, 'in', possible_values)
possible_values = (
{bool(v) for v in value} if operator == 'in' else
{True, False} - {bool(v) for v in value} # operator == 'not in'
)
if len(possible_values) != 1:
return SQL("TRUE") if possible_values else SQL("FALSE")
is_true = True in possible_values
return SQL("%s IS TRUE", sql_field) if is_true else SQL("%s IS NOT TRUE", sql_field)
class Json(Field):
""" JSON Field that contain unstructured information in jsonb PostgreSQL column.
This field is still in beta
Some features have not been implemented and won't be implemented in stable versions, including:
* searching
* indexing
* mutating the values.
"""
type = 'json'
_column_type = ('jsonb', 'jsonb')
def convert_to_record(self, value, record):
""" Return a copy of the value """
return False if value is None else copy.deepcopy(value)
def convert_to_cache(self, value, record, validate=True):
if not value:
return None
return json.loads(json.dumps(value))
def convert_to_column(self, value, record, values=None, validate=True):
if not value:
return None
return PsycopgJson(value)
def convert_to_export(self, value, record):
if not value:
return ''
return json.dumps(value)
class Id(Field[IdType | typing.Literal[False]]):
""" Special case for field 'id'. """
# Note: This field type is not necessarily an integer!
type = 'integer' # note this conflicts with Integer
column_type = ('int4', 'int4')
string = 'ID'
store = True
readonly = True
prefetch = False
def update_db(self, model, columns):
pass # this column is created with the table
def __get__(self, record, owner=None):
if record is None:
return self # the field is accessed through the class owner
# the code below is written to make record.id as quick as possible
ids = record._ids
size = len(ids)
if size == 0:
return False
elif size == 1:
return ids[0]
raise ValueError("Expected singleton: %s" % record)
def __set__(self, record, value):
raise TypeError("field 'id' cannot be assigned")
def convert_to_column(self, value, record, values=None, validate=True):
return value
def to_sql(self, model: BaseModel, alias: str) -> SQL:
# do not flush, just return the identifier
assert self.store, 'id field must be stored'
# id is never flushed
return SQL.identifier(alias, self.name)
def expression_getter(self, field_expr):
if field_expr != 'id.origin':
return super().expression_getter(field_expr)
def getter(record):
return (id_ := record._ids[0]) or getattr(id_, 'origin', None) or False
return getter

View file

@ -0,0 +1,290 @@
from __future__ import annotations
import typing
from operator import attrgetter
from xmlrpc.client import MAXINT # TODO change this
from odoo.exceptions import AccessError
from odoo.tools import float_compare, float_is_zero, float_repr, float_round
from odoo.tools.misc import SENTINEL, Sentinel
from .fields import Field
if typing.TYPE_CHECKING:
from .types import BaseModel, Environment
class Integer(Field[int]):
""" Encapsulates an :class:`int`. """
type = 'integer'
_column_type = ('int4', 'int4')
falsy_value = 0
aggregator = 'sum'
def _get_attrs(self, model_class, name):
res = super()._get_attrs(model_class, name)
# The default aggregator is None for sequence fields
if 'aggregator' not in res and name == 'sequence':
res['aggregator'] = None
return res
def convert_to_column(self, value, record, values=None, validate=True):
return int(value or 0)
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, dict):
# special case, when an integer field is used as inverse for a one2many
return value.get('id', None)
return int(value or 0)
def convert_to_record(self, value, record):
return value or 0
def convert_to_read(self, value, record, use_display_name=True):
# Integer values greater than 2^31-1 are not supported in pure XMLRPC,
# so we have to pass them as floats :-(
if value and value > MAXINT:
return float(value)
return value
def _update_inverse(self, records: BaseModel, value: BaseModel):
self._update_cache(records, value.id or 0)
def convert_to_export(self, value, record):
if value or value == 0:
return value
return ''
class Float(Field[float]):
""" Encapsulates a :class:`float`.
The precision digits are given by the (optional) ``digits`` attribute.
:param digits: a pair (total, decimal) or a string referencing a
:class:`~odoo.addons.base.models.decimal_precision.DecimalPrecision` record name.
:type digits: tuple(int,int) or str
When a float is a quantity associated with an unit of measure, it is important
to use the right tool to compare or round values with the correct precision.
The Float class provides some static methods for this purpose:
:func:`~odoo.fields.Float.round()` to round a float with the given precision.
:func:`~odoo.fields.Float.is_zero()` to check if a float equals zero at the given precision.
:func:`~odoo.fields.Float.compare()` to compare two floats at the given precision.
.. admonition:: Example
To round a quantity with the precision of the unit of measure::
fields.Float.round(self.product_uom_qty, precision_rounding=self.product_uom_id.rounding)
To check if the quantity is zero with the precision of the unit of measure::
fields.Float.is_zero(self.product_uom_qty, precision_rounding=self.product_uom_id.rounding)
To compare two quantities::
field.Float.compare(self.product_uom_qty, self.qty_done, precision_rounding=self.product_uom_id.rounding)
The compare helper uses the __cmp__ semantics for historic purposes, therefore
the proper, idiomatic way to use this helper is like so:
if result == 0, the first and second floats are equal
if result < 0, the first float is lower than the second
if result > 0, the first float is greater than the second
"""
type = 'float'
_digits: str | tuple[int, int] | None = None # digits argument passed to class initializer
falsy_value = 0.0
aggregator = 'sum'
def __init__(self, string: str | Sentinel = SENTINEL, digits: str | tuple[int, int] | Sentinel | None = SENTINEL, **kwargs):
super().__init__(string=string, _digits=digits, **kwargs)
@property
def _column_type(self):
# Explicit support for "falsy" digits (0, False) to indicate a NUMERIC
# field with no fixed precision. The values are saved in the database
# with all significant digits.
# FLOAT8 type is still the default when there is no precision because it
# is faster for most operations (sums, etc.)
return ('numeric', 'numeric') if self._digits is not None else \
('float8', 'double precision')
def get_digits(self, env: Environment) -> tuple[int, int] | None:
if isinstance(self._digits, str):
precision = env['decimal.precision'].precision_get(self._digits)
return 16, precision
else:
return self._digits
_related__digits = property(attrgetter('_digits'))
def _description_digits(self, env: Environment) -> tuple[int, int] | None:
return self.get_digits(env)
def convert_to_column(self, value, record, values=None, validate=True):
value_float = value = float(value or 0.0)
if digits := self.get_digits(record.env):
_precision, scale = digits
value_float = float_round(value, precision_digits=scale)
value = float_repr(value_float, precision_digits=scale)
if self.company_dependent:
return value_float
return value
def convert_to_cache(self, value, record, validate=True):
# apply rounding here, otherwise value in cache may be wrong!
value = float(value or 0.0)
digits = self.get_digits(record.env)
return float_round(value, precision_digits=digits[1]) if digits else value
def convert_to_record(self, value, record):
return value or 0.0
def convert_to_export(self, value, record):
if value or value == 0.0:
return value
return ''
round = staticmethod(float_round)
is_zero = staticmethod(float_is_zero)
compare = staticmethod(float_compare)
class Monetary(Field[float]):
""" Encapsulates a :class:`float` expressed in a given
:class:`res_currency<odoo.addons.base.models.res_currency.Currency>`.
The decimal precision and currency symbol are taken from the ``currency_field`` attribute.
:param str currency_field: name of the :class:`Many2one` field
holding the :class:`res_currency <odoo.addons.base.models.res_currency.Currency>`
this monetary field is expressed in (default: `\'currency_id\'`)
"""
type = 'monetary'
write_sequence = 10
_column_type = ('numeric', 'numeric')
falsy_value = 0.0
currency_field: Field | None = None
aggregator = 'sum'
def __init__(self, string: str | Sentinel = SENTINEL, currency_field: str | Sentinel = SENTINEL, **kwargs):
super().__init__(string=string, currency_field=currency_field, **kwargs)
def _description_currency_field(self, env: Environment) -> str | None:
return self.get_currency_field(env[self.model_name])
def _description_aggregator(self, env: Environment):
model = env[self.model_name]
query = model._as_query(ordered=False)
currency_field_name = self.get_currency_field(model)
currency_field = model._fields[currency_field_name]
# The currency field needs to be aggregable too
if not currency_field.column_type or not currency_field.store:
try:
model._read_group_select(f"{currency_field_name}:array_agg_distinct", query)
except (ValueError, AccessError):
return None
return super()._description_aggregator(env)
def get_currency_field(self, model: BaseModel) -> str | None:
""" Return the name of the currency field. """
return self.currency_field or (
'currency_id' if 'currency_id' in model._fields else
'x_currency_id' if 'x_currency_id' in model._fields else
None
)
def setup_nonrelated(self, model):
super().setup_nonrelated(model)
assert self.get_currency_field(model) in model._fields, \
"Field %s with unknown currency_field %r" % (self, self.get_currency_field(model))
def setup_related(self, model):
super().setup_related(model)
if self.inherited:
self.currency_field = self.related_field.get_currency_field(model.env[self.related_field.model_name])
assert self.get_currency_field(model) in model._fields, \
"Field %s with unknown currency_field %r" % (self, self.get_currency_field(model))
def convert_to_column_insert(self, value, record, values=None, validate=True):
# retrieve currency from values or record
currency_field_name = self.get_currency_field(record)
currency_field = record._fields[currency_field_name]
if values and currency_field_name in values:
dummy = record.new({currency_field_name: values[currency_field_name]})
currency = dummy[currency_field_name]
elif values and currency_field.related and currency_field.related.split('.')[0] in values:
related_field_name = currency_field.related.split('.')[0]
dummy = record.new({related_field_name: values[related_field_name]})
currency = dummy[currency_field_name]
else:
# Note: this is wrong if 'record' is several records with different
# currencies, which is functional nonsense and should not happen
# BEWARE: do not prefetch other fields, because 'value' may be in
# cache, and would be overridden by the value read from database!
currency = record[:1].sudo().with_context(prefetch_fields=False)[currency_field_name]
currency = currency.with_env(record.env)
value = float(value or 0.0)
if currency:
return float_repr(currency.round(value), currency.decimal_places)
return value
def convert_to_cache(self, value, record, validate=True):
# cache format: float
value = float(value or 0.0)
if value and validate:
# FIXME @rco-odoo: currency may not be already initialized if it is
# a function or related field!
# BEWARE: do not prefetch other fields, because 'value' may be in
# cache, and would be overridden by the value read from database!
currency_field = self.get_currency_field(record)
currency = record.sudo().with_context(prefetch_fields=False)[currency_field]
if len(currency) > 1:
raise ValueError("Got multiple currencies while assigning values of monetary field %s" % str(self))
elif currency:
value = currency.with_env(record.env).round(value)
return value
def convert_to_record(self, value, record):
return value or 0.0
def convert_to_read(self, value, record, use_display_name=True):
return value
def convert_to_write(self, value, record):
return value
def convert_to_export(self, value, record):
if value or value == 0.0:
return value
return ''
def _filter_not_equal(self, records: BaseModel, cache_value: typing.Any) -> BaseModel:
records = super()._filter_not_equal(records, cache_value)
if not records:
return records
# check that the values were rounded properly when put in cache
# see fix odoo/odoo#177200 (commit 7164d5295904b08ec3a0dc1fb54b217671ff531c)
env = records.env
field_cache = self._get_cache(env)
currency_field = records._fields[self.get_currency_field(records)]
return records.browse(
record_id
for record_id, record_sudo in zip(
records._ids, records.sudo().with_context(prefetch_fields=False)
)
if not (
(value := field_cache.get(record_id))
and (currency := currency_field.__get__(record_sudo))
and currency.with_env(env).round(value) == cache_value
)
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,118 @@
from collections import defaultdict
from operator import attrgetter
from odoo.tools import OrderedSet, unique
from odoo.tools.sql import pg_varchar
from .fields import Field
from .fields_numeric import Integer
from .fields_selection import Selection
from .models import BaseModel
class Reference(Selection):
""" Pseudo-relational field (no FK in database).
The field value is stored as a :class:`string <str>` following the pattern
``"res_model,res_id"`` in database.
"""
type = 'reference'
_column_type = ('varchar', pg_varchar())
def convert_to_column(self, value, record, values=None, validate=True):
return Field.convert_to_column(self, value, record, values, validate)
def convert_to_cache(self, value, record, validate=True):
# cache format: str ("model,id") or None
if isinstance(value, BaseModel):
if not validate or (value._name in self.get_values(record.env) and len(value) <= 1):
return "%s,%s" % (value._name, value.id) if value else None
elif isinstance(value, str):
res_model, res_id = value.split(',')
if not validate or res_model in self.get_values(record.env):
if record.env[res_model].browse(int(res_id)).exists():
return value
else:
return None
elif not value:
return None
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_record(self, value, record):
if value:
res_model, res_id = value.split(',')
return record.env[res_model].browse(int(res_id))
return None
def convert_to_read(self, value, record, use_display_name=True):
return "%s,%s" % (value._name, value.id) if value else False
def convert_to_export(self, value, record):
return value.display_name if value else ''
def convert_to_display_name(self, value, record):
return value.display_name if value else False
class Many2oneReference(Integer):
""" Pseudo-relational field (no FK in database).
The field value is stored as an :class:`integer <int>` id in database.
Contrary to :class:`Reference` fields, the model has to be specified
in a :class:`Char` field, whose name has to be specified in the
`model_field` attribute for the current :class:`Many2oneReference` field.
:param str model_field: name of the :class:`Char` where the model name is stored.
"""
type = 'many2one_reference'
model_field = None
aggregator = None
_related_model_field = property(attrgetter('model_field'))
_description_model_field = property(attrgetter('model_field'))
def convert_to_cache(self, value, record, validate=True):
# cache format: id or None
if isinstance(value, BaseModel):
value = value._ids[0] if value._ids else None
return super().convert_to_cache(value, record, validate)
def _update_inverses(self, records: BaseModel, value):
""" Add `records` to the cached values of the inverse fields of `self`. """
if not value:
return
model_ids = self._record_ids_per_res_model(records)
for invf in records.pool.field_inverses[self]:
records = records.browse(model_ids[invf.model_name])
if not records:
continue
corecord = records.env[invf.model_name].browse(value)
records = records.filtered_domain(invf.get_comodel_domain(corecord))
if not records:
continue
ids0 = invf._get_cache(corecord.env).get(corecord.id)
# if the value for the corecord is not in cache, but this is a new
# record, assign it anyway, as you won't be able to fetch it from
# database (see `test_sale_order`)
if ids0 is not None or not corecord.id:
ids1 = tuple(unique((ids0 or ()) + records._ids))
invf._update_cache(corecord, ids1)
def _record_ids_per_res_model(self, records: BaseModel) -> dict[str, OrderedSet]:
model_ids = defaultdict(OrderedSet)
for record in records:
model = record[self.model_field]
if not model and record._fields[self.model_field].compute:
# fallback when the model field is computed :-/
record._fields[self.model_field].compute_value(record)
model = record[self.model_field]
if not model:
continue
model_ids[model].add(record.id)
return model_ids

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,244 @@
from __future__ import annotations
import typing
from collections import defaultdict
from odoo.tools.misc import ReadonlyDict, SENTINEL, Sentinel, merge_sequences
from odoo.tools.sql import pg_varchar
from .fields import Field, _logger, determine, resolve_mro
if typing.TYPE_CHECKING:
from collections.abc import Callable
from .types import BaseModel
SelectValue = tuple[str, str] # (value, string)
OnDeletePolicy = str | Callable[[BaseModel], None]
class Selection(Field[str | typing.Literal[False]]):
""" Encapsulates an exclusive choice between different values.
:param selection: specifies the possible values for this field.
It is given as either a list of pairs ``(value, label)``, or a model
method, or a method name.
:type selection: list(tuple(str,str)) or callable or str
:param selection_add: provides an extension of the selection in the case
of an overridden field. It is a list of pairs ``(value, label)`` or
singletons ``(value,)``, where singleton values must appear in the
overridden selection. The new values are inserted in an order that is
consistent with the overridden selection and this list::
selection = [('a', 'A'), ('b', 'B')]
selection_add = [('c', 'C'), ('b',)]
> result = [('a', 'A'), ('c', 'C'), ('b', 'B')]
:type selection_add: list(tuple(str,str))
:param ondelete: provides a fallback mechanism for any overridden
field with a selection_add. It is a dict that maps every option
from the selection_add to a fallback action.
This fallback action will be applied to all records whose
selection_add option maps to it.
The actions can be any of the following:
- 'set null' -- the default, all records with this option
will have their selection value set to False.
- 'cascade' -- all records with this option will be
deleted along with the option itself.
- 'set default' -- all records with this option will be
set to the default of the field definition
- 'set VALUE' -- all records with this option will be
set to the given value
- <callable> -- a callable whose first and only argument will be
the set of records containing the specified Selection option,
for custom processing
The attribute ``selection`` is mandatory except in the case of
``related`` or extended fields.
"""
type = 'selection'
_column_type = ('varchar', pg_varchar())
selection: list[SelectValue] | str | Callable[[BaseModel], list[SelectValue]] | None = None # [(value, string), ...], function or method name
validate: bool = True # whether validating upon write
ondelete: dict[str, OnDeletePolicy] | None = None # {value: policy} (what to do when value is deleted)
def __init__(self, selection=SENTINEL, string: str | Sentinel = SENTINEL, **kwargs):
super().__init__(selection=selection, string=string, **kwargs)
self._selection = dict(selection) if isinstance(selection, list) else None
def setup_nonrelated(self, model):
super().setup_nonrelated(model)
assert self.selection is not None, "Field %s without selection" % self
def setup_related(self, model):
super().setup_related(model)
# selection must be computed on related field
field = self.related_field
self.selection = lambda model: field._description_selection(model.env)
self._selection = None
def _get_attrs(self, model_class, name):
attrs = super()._get_attrs(model_class, name)
# arguments 'selection' and 'selection_add' are processed below
attrs.pop('selection_add', None)
# Selection fields have an optional default implementation of a group_expand function
if attrs.get('group_expand') is True:
attrs['group_expand'] = self._default_group_expand
return attrs
def _setup_attrs__(self, model_class, name):
super()._setup_attrs__(model_class, name)
if not self._base_fields__:
return
# determine selection (applying 'selection_add' extensions) as a dict
values = None
for field in self._base_fields__:
# We cannot use field.selection or field.selection_add here
# because those attributes are overridden by ``_setup_attrs__``.
if 'selection' in field._args__:
if self.related:
_logger.warning("%s: selection attribute will be ignored as the field is related", self)
selection = field._args__['selection']
if isinstance(selection, (list, tuple)):
if values is not None and list(values) != [kv[0] for kv in selection]:
_logger.warning("%s: selection=%r overrides existing selection; use selection_add instead", self, selection)
values = dict(selection)
self.ondelete = {}
elif callable(selection) or isinstance(selection, str):
self.ondelete = None
self.selection = selection
values = None
else:
raise ValueError(f"{self!r}: selection={selection!r} should be a list, a callable or a method name")
if 'selection_add' in field._args__:
if self.related:
_logger.warning("%s: selection_add attribute will be ignored as the field is related", self)
selection_add = field._args__['selection_add']
assert isinstance(selection_add, list), \
"%s: selection_add=%r must be a list" % (self, selection_add)
assert values is not None, \
"%s: selection_add=%r on non-list selection %r" % (self, selection_add, self.selection)
values_add = {kv[0]: (kv[1] if len(kv) > 1 else None) for kv in selection_add}
ondelete = field._args__.get('ondelete') or {}
new_values = [key for key in values_add if key not in values]
for key in new_values:
ondelete.setdefault(key, 'set null')
if self.required and new_values and 'set null' in ondelete.values():
raise ValueError(
"%r: required selection fields must define an ondelete policy that "
"implements the proper cleanup of the corresponding records upon "
"module uninstallation. Please use one or more of the following "
"policies: 'set default' (if the field has a default defined), 'cascade', "
"or a single-argument callable where the argument is the recordset "
"containing the specified option." % self
)
# check ondelete values
for key, val in ondelete.items():
if callable(val) or val in ('set null', 'cascade'):
continue
if val == 'set default':
assert self.default is not None, (
"%r: ondelete policy of type 'set default' is invalid for this field "
"as it does not define a default! Either define one in the base "
"field, or change the chosen ondelete policy" % self
)
elif val.startswith('set '):
assert val[4:] in values, (
"%s: ondelete policy of type 'set %%' must be either 'set null', "
"'set default', or 'set value' where value is a valid selection value."
) % self
else:
raise ValueError(
"%r: ondelete policy %r for selection value %r is not a valid ondelete"
" policy, please choose one of 'set null', 'set default', "
"'set [value]', 'cascade' or a callable" % (self, val, key)
)
values = {
key: values_add.get(key) or values[key]
for key in merge_sequences(values, values_add)
}
self.ondelete.update(ondelete)
if values is not None:
self.selection = list(values.items())
assert all(isinstance(key, str) for key in values), \
"Field %s with non-str value in selection" % self
self._selection = values
def _selection_modules(self, model):
""" Return a mapping from selection values to modules defining each value. """
if not isinstance(self.selection, list):
return {}
value_modules = defaultdict(set)
for field in reversed(resolve_mro(model, self.name, type(self).__instancecheck__)):
module = field._module
if not module:
continue
if 'selection' in field._args__:
value_modules.clear()
if isinstance(field._args__['selection'], list):
for value, _label in field._args__['selection']:
value_modules[value].add(module)
if 'selection_add' in field._args__:
for value_label in field._args__['selection_add']:
if len(value_label) > 1:
value_modules[value_label[0]].add(module)
return value_modules
def _description_selection(self, env):
""" return the selection list (pairs (value, label)); labels are
translated according to context language
"""
selection = self.selection
if isinstance(selection, str) or callable(selection):
selection = determine(selection, env[self.model_name])
# force all values to be strings (check _get_year_selection)
return [(str(key), str(label)) for key, label in selection]
# translate selection labels
if env.lang:
return env['ir.model.fields'].get_field_selection(self.model_name, self.name)
else:
return selection
def _default_group_expand(self, records, groups, domain):
# return a group per selection option, in definition order
return self.get_values(records.env)
def get_values(self, env):
"""Return a list of the possible values."""
selection = self.selection
if isinstance(selection, str) or callable(selection):
selection = determine(selection, env[self.model_name].with_context(lang=None))
return [value for value, _ in selection]
def convert_to_column(self, value, record, values=None, validate=True):
if validate and self.validate:
value = self.convert_to_cache(value, record)
return super().convert_to_column(value, record, values, validate)
def convert_to_cache(self, value, record, validate=True):
if not validate or self._selection is None:
return value or None
if value in self._selection:
return value
if not value:
return None
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, record):
for item in self._description_selection(record.env):
if item[0] == value:
return item[1]
return value or ''

View file

@ -0,0 +1,294 @@
from __future__ import annotations
import typing
from datetime import date, datetime, time
import pytz
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
from odoo.tools import SQL, date_utils
from .fields import Field, _logger
from .utils import parse_field_expr, READ_GROUP_NUMBER_GRANULARITY
if typing.TYPE_CHECKING:
from collections.abc import Callable
from odoo.tools import Query
from .models import BaseModel
T = typing.TypeVar("T")
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
class BaseDate(Field[T | typing.Literal[False]], typing.Generic[T]):
""" Common field properties for Date and Datetime. """
start_of = staticmethod(date_utils.start_of)
end_of = staticmethod(date_utils.end_of)
add = staticmethod(date_utils.add)
subtract = staticmethod(date_utils.subtract)
def expression_getter(self, field_expr):
_fname, property_name = parse_field_expr(field_expr)
if not property_name:
return super().expression_getter(field_expr)
get_value = self.__get__
get_property = self._expression_property_getter(property_name)
return lambda record: (value := get_value(record)) and get_property(value)
def _expression_property_getter(self, property_name: str) -> Callable[[T], typing.Any]:
""" Return a function that maps a field value (date or datetime) to the
given ``property_name``.
"""
match property_name:
case 'tz':
return lambda value: value
case 'year_number':
return lambda value: value.year
case 'quarter_number':
return lambda value: value.month // 4 + 1
case 'month_number':
return lambda value: value.month
case 'iso_week_number':
return lambda value: value.isocalendar().week
case 'day_of_year':
return lambda value: value.timetuple().tm_yday
case 'day_of_month':
return lambda value: value.day
case 'day_of_week':
return lambda value: value.timetuple().tm_wday
case 'hour_number' if self.type == 'datetime':
return lambda value: value.hour
case 'minute_number' if self.type == 'datetime':
return lambda value: value.minute
case 'second_number' if self.type == 'datetime':
return lambda value: value.second
case 'hour_number' | 'minute_number' | 'second_number':
# for dates, it is always 0
return lambda value: 0
assert property_name not in READ_GROUP_NUMBER_GRANULARITY, f"Property not implemented {property_name}"
raise ValueError(
f"Error when processing the granularity {property_name} is not supported. "
f"Only {', '.join(READ_GROUP_NUMBER_GRANULARITY.keys())} are supported"
)
def property_to_sql(self, field_sql: SQL, property_name: str, model: BaseModel, alias: str, query: Query) -> SQL:
sql_expr = field_sql
if self.type == 'datetime' and (timezone := model.env.context.get('tz')):
# only use the timezone from the context
if timezone in pytz.all_timezones_set:
sql_expr = SQL("timezone(%s, timezone('UTC', %s))", timezone, sql_expr)
else:
_logger.warning("Grouping in unknown / legacy timezone %r", timezone)
if property_name == 'tz':
# set only the timezone
return sql_expr
if property_name not in READ_GROUP_NUMBER_GRANULARITY:
raise ValueError(f'Error when processing the granularity {property_name} is not supported. Only {", ".join(READ_GROUP_NUMBER_GRANULARITY.keys())} are supported')
granularity = READ_GROUP_NUMBER_GRANULARITY[property_name]
sql_expr = SQL('date_part(%s, %s)', granularity, sql_expr)
return sql_expr
def convert_to_column(self, value, record, values=None, validate=True):
# we can write date/datetime directly using psycopg
# except for company_dependent fields where we expect a string value
value = self.convert_to_cache(value, record, validate=validate)
if value and self.company_dependent:
value = self.to_string(value)
return value
class Date(BaseDate[date]):
""" Encapsulates a python :class:`date <datetime.date>` object. """
type = 'date'
_column_type = ('date', 'date')
@staticmethod
def today(*args) -> date:
"""Return the current day in the format expected by the ORM.
.. note:: This function may be used to compute default values.
"""
return date.today()
@staticmethod
def context_today(record: BaseModel, timestamp: date | datetime | None = None) -> date:
"""Return the current date as seen in the client's timezone in a format
fit for date fields.
.. note:: This method may be used to compute default values.
:param record: recordset from which the timezone will be obtained.
:param timestamp: optional datetime value to use instead of
the current date and time (must be a datetime, regular dates
can't be converted between timezones).
"""
today = timestamp or datetime.now()
tz = record.env.tz
today_utc = pytz.utc.localize(today, is_dst=False) # UTC = no DST
today = today_utc.astimezone(tz)
return today.date()
@staticmethod
def to_date(value) -> date | None:
"""Attempt to convert ``value`` to a :class:`date` object.
.. warning::
If a datetime object is given as value,
it will be converted to a date object and all
datetime-specific information will be lost (HMS, TZ, ...).
:param value: value to convert.
:type value: str or date or datetime
:return: an object representing ``value``.
"""
if not value:
return None
if isinstance(value, date):
if isinstance(value, datetime):
return value.date()
return value
value = value[:DATE_LENGTH]
return datetime.strptime(value, DATE_FORMAT).date()
# kept for backwards compatibility, but consider `from_string` as deprecated, will probably
# be removed after V12
from_string = to_date
@staticmethod
def to_string(value: date | typing.Literal[False]) -> str | typing.Literal[False]:
"""
Convert a :class:`date` or :class:`datetime` object to a string.
:param value: value to convert.
:return: a string representing ``value`` in the server's date format, if ``value`` is of
type :class:`datetime`, the hours, minute, seconds, tzinfo will be truncated.
"""
return value.strftime(DATE_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
if not value:
return None
if isinstance(value, datetime):
# TODO: better fix data files (crm demo data)
value = value.date()
# raise TypeError("%s (field %s) must be string or date, not datetime." % (value, self))
return self.to_date(value)
def convert_to_export(self, value, record):
return self.to_date(value) or ''
def convert_to_display_name(self, value, record):
return Date.to_string(value)
class Datetime(BaseDate[datetime]):
""" Encapsulates a python :class:`datetime <datetime.datetime>` object. """
type = 'datetime'
_column_type = ('timestamp', 'timestamp')
@staticmethod
def now(*args) -> datetime:
"""Return the current day and time in the format expected by the ORM.
.. note:: This function may be used to compute default values.
"""
# microseconds must be annihilated as they don't comply with the server datetime format
return datetime.now().replace(microsecond=0)
@staticmethod
def today(*args) -> datetime:
"""Return the current day, at midnight (00:00:00)."""
return Datetime.now().replace(hour=0, minute=0, second=0)
@staticmethod
def context_timestamp(record: BaseModel, timestamp: datetime) -> datetime:
"""Return the given timestamp converted to the client's timezone.
.. note:: This method is *not* meant for use as a default initializer,
because datetime fields are automatically converted upon
display on client side. For default values, :meth:`now`
should be used instead.
:param record: recordset from which the timezone will be obtained.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone.
:return: timestamp converted to timezone-aware datetime in context timezone.
:rtype: datetime
"""
assert isinstance(timestamp, datetime), 'Datetime instance expected'
tz = record.env.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
timestamp = utc_timestamp.astimezone(tz)
return timestamp
@staticmethod
def to_datetime(value) -> datetime | None:
"""Convert an ORM ``value`` into a :class:`datetime` value.
:param value: value to convert.
:type value: str or date or datetime
:return: an object representing ``value``.
"""
if not value:
return None
if isinstance(value, date):
if isinstance(value, datetime):
if value.tzinfo:
raise ValueError("Datetime field expects a naive datetime: %s" % value)
return value
return datetime.combine(value, time.min)
# TODO: fix data files
return datetime.strptime(value, DATETIME_FORMAT[:len(value)-2])
# kept for backwards compatibility, but consider `from_string` as deprecated, will probably
# be removed after V12
from_string = to_datetime
@staticmethod
def to_string(value: datetime | typing.Literal[False]) -> str | typing.Literal[False]:
"""Convert a :class:`datetime` or :class:`date` object to a string.
:param value: value to convert.
:type value: datetime or date
:return: a string representing ``value`` in the server's datetime format,
if ``value`` is of type :class:`date`,
the time portion will be midnight (00:00:00).
"""
return value.strftime(DATETIME_FORMAT) if value else False
def expression_getter(self, field_expr: str) -> Callable[[BaseModel], typing.Any]:
if field_expr == self.name:
return self.__get__
_fname, property_name = parse_field_expr(field_expr)
get_property = self._expression_property_getter(property_name)
def getter(record):
dt = self.__get__(record)
if not dt:
return False
if (tz := record.env.context.get('tz')) and tz in pytz.all_timezones_set:
# only use the timezone from the context
dt = dt.astimezone(pytz.timezone(tz))
return get_property(dt)
return getter
def convert_to_cache(self, value, record, validate=True):
return self.to_datetime(value)
def convert_to_export(self, value, record):
value = self.convert_to_display_name(value, record)
return self.to_datetime(value) or ''
def convert_to_display_name(self, value, record):
if not value:
return False
return Datetime.to_string(Datetime.context_timestamp(record, value))

View file

@ -0,0 +1,773 @@
from __future__ import annotations
import collections.abc
import logging
import typing
from collections import defaultdict
from difflib import get_close_matches, unified_diff
from hashlib import sha256
from operator import attrgetter
from markupsafe import Markup
from markupsafe import escape as markup_escape
from psycopg2.extras import Json as PsycopgJson
from odoo.exceptions import AccessError, UserError
from odoo.netsvc import COLOR_PATTERN, DEFAULT, GREEN, RED, ColoredFormatter
from odoo.tools import SQL, html_normalize, html_sanitize, html2plaintext, is_html_empty, plaintext2html, sql
from odoo.tools.misc import OrderedSet, SENTINEL, Sentinel
from odoo.tools.sql import pattern_to_translated_trigram_pattern, pg_varchar, value_to_translated_trigram_pattern
from odoo.tools.translate import html_translate
from .fields import Field, _logger
from .utils import COLLECTION_TYPES, SQL_OPERATORS
if typing.TYPE_CHECKING:
from .models import BaseModel
from odoo.tools import Query
if typing.TYPE_CHECKING:
from collections.abc import Callable
class BaseString(Field[str | typing.Literal[False]]):
""" Abstract class for string fields. """
translate: bool | Callable[[Callable[[str], str], str], str] = False # whether the field is translated
size = None # maximum size of values (deprecated)
is_text = True
falsy_value = ''
def __init__(self, string: str | Sentinel = SENTINEL, **kwargs):
# translate is either True, False, or a callable
if 'translate' in kwargs and not callable(kwargs['translate']):
kwargs['translate'] = bool(kwargs['translate'])
super().__init__(string=string, **kwargs)
_related_translate = property(attrgetter('translate'))
def _description_translate(self, env):
return bool(self.translate)
def setup_related(self, model):
super().setup_related(model)
if self.store and self.translate:
_logger.warning("Translated stored related field (%s) will not be computed correctly in all languages", self)
def get_depends(self, model):
if self.translate and self.store:
dep, dep_ctx = super().get_depends(model)
if dep_ctx:
_logger.warning("Translated stored fields (%s) cannot depend on context", self)
return dep, ()
return super().get_depends(model)
def _convert_db_column(self, model, column):
# specialized implementation for converting from/to translated fields
if self.translate or column['udt_name'] == 'jsonb':
sql.convert_column_translatable(model.env.cr, model._table, self.name, self.column_type[1])
else:
sql.convert_column(model.env.cr, model._table, self.name, self.column_type[1])
def get_trans_terms(self, value):
""" Return the sequence of terms to translate found in `value`. """
if not callable(self.translate):
return [value] if value else []
terms = []
self.translate(terms.append, value)
return terms
def get_text_content(self, term):
""" Return the textual content for the given term. """
func = getattr(self.translate, 'get_text_content', lambda term: term)
return func(term)
def convert_to_column(self, value, record, values=None, validate=True):
return self.convert_to_cache(value, record, validate)
def convert_to_column_insert(self, value, record, values=None, validate=True):
if self.translate:
value = self.convert_to_column(value, record, values, validate)
if value is None:
return None
return PsycopgJson({'en_US': value, record.env.lang or 'en_US': value})
return super().convert_to_column_insert(value, record, values, validate)
def get_column_update(self, record):
if self.translate:
assert self not in record.env._field_depends_context, f"translated field {self} cannot depend on context"
value = record.env.transaction.field_data[self][record.id]
return PsycopgJson(value) if value else None
return super().get_column_update(record)
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
if isinstance(value, bytes):
s = value.decode()
else:
s = str(value)
value = s[:self.size]
if validate and callable(self.translate):
# pylint: disable=not-callable
value = self.translate(lambda t: None, value)
return value
def convert_to_record(self, value, record):
if value is None:
return False
if not self.translate:
return value
if isinstance(value, dict):
lang = self.translation_lang(record.env)
# raise a KeyError for the __get__ function
value = value[lang]
if (
callable(self.translate)
and record.env.context.get('edit_translations')
and self.get_trans_terms(value)
):
base_lang = record._get_base_lang()
lang = record.env.lang or 'en_US'
if lang != base_lang:
base_value = record.with_context(edit_translations=None, check_translations=True, lang=base_lang)[self.name]
base_terms_iter = iter(self.get_trans_terms(base_value))
get_base = lambda term: next(base_terms_iter)
else:
get_base = lambda term: term
delay_translation = value != record.with_context(edit_translations=None, check_translations=None, lang=lang)[self.name]
# use a wrapper to let the frontend js code identify each term and
# its metadata in the 'edit_translations' context
def translate_func(term):
source_term = get_base(term)
translation_state = 'translated' if lang == base_lang or source_term != term else 'to_translate'
translation_source_sha = sha256(source_term.encode()).hexdigest()
return (
'<span '
f'''{'class="o_delay_translation" ' if delay_translation else ''}'''
f'data-oe-model="{markup_escape(record._name)}" '
f'data-oe-id="{markup_escape(record.id)}" '
f'data-oe-field="{markup_escape(self.name)}" '
f'data-oe-translation-state="{translation_state}" '
f'data-oe-translation-source-sha="{translation_source_sha}"'
'>'
f'{term}'
'</span>'
)
# pylint: disable=not-callable
value = self.translate(translate_func, value)
return value
def convert_to_write(self, value, record):
return value
def get_translation_dictionary(self, from_lang_value, to_lang_values):
""" Build a dictionary from terms in from_lang_value to terms in to_lang_values
:param str from_lang_value: from xml/html
:param dict to_lang_values: {lang: lang_value}
:return: {from_lang_term: {lang: lang_term}}
:rtype: dict
"""
from_lang_terms = self.get_trans_terms(from_lang_value)
dictionary = defaultdict(lambda: defaultdict(dict))
if not from_lang_terms:
return dictionary
dictionary.update({from_lang_term: defaultdict(dict) for from_lang_term in from_lang_terms})
for lang, to_lang_value in to_lang_values.items():
to_lang_terms = self.get_trans_terms(to_lang_value)
if len(from_lang_terms) != len(to_lang_terms):
for from_lang_term in from_lang_terms:
dictionary[from_lang_term][lang] = from_lang_term
else:
for from_lang_term, to_lang_term in zip(from_lang_terms, to_lang_terms):
dictionary[from_lang_term][lang] = to_lang_term
return dictionary
def _get_stored_translations(self, record):
"""
: return: {'en_US': 'value_en_US', 'fr_FR': 'French'}
"""
# assert (self.translate and self.store and record)
record.flush_recordset([self.name])
cr = record.env.cr
cr.execute(SQL(
"SELECT %s FROM %s WHERE id = %s",
SQL.identifier(self.name),
SQL.identifier(record._table),
record.id,
))
res = cr.fetchone()
return res[0] if res else None
def translation_lang(self, env):
return (env.lang or 'en_US') if self.translate is True else env._lang
def get_translation_fallback_langs(self, env):
lang = self.translation_lang(env)
if lang == '_en_US':
return '_en_US', 'en_US'
if lang == 'en_US':
return ('en_US',)
if lang.startswith('_'):
return lang, lang[1:], '_en_US', 'en_US'
return lang, 'en_US'
def _get_cache_impl(self, env):
cache = super()._get_cache_impl(env)
if not self.translate or env.context.get('prefetch_langs'):
return cache
lang = self.translation_lang(env)
return LangProxyDict(self, cache, lang)
def _cache_missing_ids(self, records):
if self.translate and records.env.context.get('prefetch_langs'):
# we always need to fetch the current language in the cache
records = records.with_context(prefetch_langs=False)
return super()._cache_missing_ids(records)
def _to_prefetch(self, record):
if self.translate and record.env.context.get('prefetch_langs'):
# we always need to fetch the current language in the cache
return super()._to_prefetch(record.with_context(prefetch_langs=False)).with_env(record.env)
return super()._to_prefetch(record)
def _insert_cache(self, records, values):
if not self.translate:
super()._insert_cache(records, values)
return
assert self not in records.env._field_depends_context, f"translated field {self} cannot depend on context"
env = records.env
field_cache = env.transaction.field_data[self]
if env.context.get('prefetch_langs'):
installed = [lang for lang, _ in env['res.lang'].get_installed()]
langs = OrderedSet[str](installed + ['en_US'])
u_langs: list[str] = [f'_{lang}' for lang in langs] if self.translate is not True and env._lang.startswith('_') else []
for id_, val in zip(records._ids, values):
if val is None:
field_cache.setdefault(id_, None)
else:
if u_langs: # fallback missing _lang to lang if exists
val.update({f'_{k}': v for k, v in val.items() if k in langs and f'_{k}' not in val})
field_cache[id_] = {
**dict.fromkeys(langs, val['en_US']), # fallback missing lang to en_US
**dict.fromkeys(u_langs, val.get('_en_US')), # fallback missing _lang to _en_US
**val
}
else:
lang = self.translation_lang(env)
for id_, val in zip(records._ids, values):
if val is None:
field_cache.setdefault(id_, None)
else:
cache_value = field_cache.setdefault(id_, {})
if cache_value is not None:
cache_value.setdefault(lang, val)
def _update_cache(self, records, cache_value, dirty=False):
if self.translate and cache_value is not None and records.env.context.get('prefetch_langs'):
assert isinstance(cache_value, dict), f"invalid cache value for {self}"
if len(records) > 1:
# new dict for each record
for record in records:
super()._update_cache(record, dict(cache_value), dirty)
return
super()._update_cache(records, cache_value, dirty)
def write(self, records, value):
if not self.translate or value is False or value is None:
super().write(records, value)
return
cache_value = self.convert_to_cache(value, records)
records = self._filter_not_equal(records, cache_value)
if not records:
return
field_cache = self._get_cache(records.env)
dirty_ids = records.env._field_dirty.get(self, ())
# flush dirty None values
dirty_records = records.filtered(lambda rec: rec.id in dirty_ids)
if any(field_cache.get(record_id, SENTINEL) is None for record_id in dirty_records._ids):
dirty_records.flush_recordset([self.name])
dirty = self.store and any(records._ids)
lang = self.translation_lang(records.env)
# not dirty fields
if not dirty:
if self.compute and self.inverse:
# invalidate the values in other languages to force their recomputation
self._update_cache(records.with_context(prefetch_langs=True), {lang: cache_value}, dirty=False)
else:
self._update_cache(records, cache_value, dirty=False)
return
# model translation
if not callable(self.translate):
# invalidate clean fields because them may contain fallback value
clean_records = records.filtered(lambda rec: rec.id not in dirty_ids)
clean_records.invalidate_recordset([self.name])
self._update_cache(records, cache_value, dirty=True)
if lang != 'en_US' and not records.env['res.lang']._get_data(code='en_US'):
# if 'en_US' is not active, we always write en_US to make sure value_en is meaningful
self._update_cache(records.with_context(lang='en_US'), cache_value, dirty=True)
return
# model term translation
new_translations_list = []
new_terms = set(self.get_trans_terms(cache_value))
delay_translations = records.env.context.get('delay_translations')
for record in records:
# shortcut when no term needs to be translated
if not new_terms:
new_translations_list.append({'en_US': cache_value, lang: cache_value})
continue
# _get_stored_translations can be refactored and prefetches translations for multi records,
# but it is really rare to write the same non-False/None/no-term value to multi records
stored_translations = self._get_stored_translations(record)
if not stored_translations:
new_translations_list.append({'en_US': cache_value, lang: cache_value})
continue
old_translations = {
k: stored_translations.get(f'_{k}', v)
for k, v in stored_translations.items()
if not k.startswith('_')
}
from_lang_value = old_translations.pop(lang, old_translations['en_US'])
translation_dictionary = self.get_translation_dictionary(from_lang_value, old_translations)
text2terms = defaultdict(list)
for term in new_terms:
if term_text := self.get_text_content(term):
text2terms[term_text].append(term)
is_text = self.translate.is_text if hasattr(self.translate, 'is_text') else lambda term: True
term_adapter = self.translate.term_adapter if hasattr(self.translate, 'term_adapter') else None
for old_term in list(translation_dictionary.keys()):
if old_term not in new_terms:
old_term_text = self.get_text_content(old_term)
matches = get_close_matches(old_term_text, text2terms, 1, 0.9)
if matches:
closest_term = get_close_matches(old_term, text2terms[matches[0]], 1, 0)[0]
if closest_term in translation_dictionary:
continue
old_is_text = is_text(old_term)
closest_is_text = is_text(closest_term)
if old_is_text or not closest_is_text:
if not closest_is_text and records.env.context.get("install_mode") and lang == 'en_US' and term_adapter:
adapter = term_adapter(closest_term)
if adapter(old_term) is None: # old term and closest_term have different structures
continue
translation_dictionary[closest_term] = {k: adapter(v) for k, v in translation_dictionary.pop(old_term).items()}
else:
translation_dictionary[closest_term] = translation_dictionary.pop(old_term)
# pylint: disable=not-callable
new_translations = {
l: self.translate(lambda term: translation_dictionary.get(term, {l: None})[l], cache_value)
for l in old_translations.keys()
}
if delay_translations:
new_store_translations = stored_translations
new_store_translations.update({f'_{k}': v for k, v in new_translations.items()})
new_store_translations.pop(f'_{lang}', None)
else:
new_store_translations = new_translations
new_store_translations[lang] = cache_value
if not records.env['res.lang']._get_data(code='en_US'):
new_store_translations['en_US'] = cache_value
new_store_translations.pop('_en_US', None)
new_translations_list.append(new_store_translations)
for record, new_translation in zip(records.with_context(prefetch_langs=True), new_translations_list, strict=True):
self._update_cache(record, new_translation, dirty=True)
def to_sql(self, model: BaseModel, alias: str) -> SQL:
sql_field = super().to_sql(model, alias)
if self.translate and not model.env.context.get('prefetch_langs'):
langs = self.get_translation_fallback_langs(model.env)
sql_field_langs = [SQL("%s->>%s", sql_field, lang) for lang in langs]
if len(sql_field_langs) == 1:
return sql_field_langs[0]
return SQL("COALESCE(%s)", SQL(", ").join(sql_field_langs))
return sql_field
def expression_getter(self, field_expr):
if field_expr != 'display_name.no_error':
return super().expression_getter(field_expr)
# when searching by display_name, don't raise AccessError but return an
# empty value instead
get_display_name = super().expression_getter('display_name')
def getter(record):
try:
return get_display_name(record)
except AccessError:
return ''
return getter
def condition_to_sql(self, field_expr: str, operator: str, value, model: BaseModel, alias: str, query: Query) -> SQL:
# build the condition
if self.translate and model.env.context.get('prefetch_langs'):
model = model.with_context(prefetch_langs=False)
base_condition = super().condition_to_sql(field_expr, operator, value, model, alias, query)
# faster SQL for index trigrams
if (
self.translate
and value
and operator in ('in', 'like', 'ilike', '=like', '=ilike')
and self.index == 'trigram'
and model.pool.has_trigram
and (
isinstance(value, str)
or (isinstance(value, COLLECTION_TYPES) and all(isinstance(v, str) for v in value))
)
):
# a prefilter using trigram index to speed up '=', 'like', 'ilike'
# '!=', '<=', '<', '>', '>=', 'in', 'not in', 'not like', 'not ilike' cannot use this trick
if operator == 'in' and len(value) == 1:
value = value_to_translated_trigram_pattern(next(iter(value)))
elif operator != 'in':
value = pattern_to_translated_trigram_pattern(value)
else:
value = '%'
if value == '%':
return base_condition
raw_sql_field = self.to_sql(model.with_context(prefetch_langs=True), alias)
sql_left = SQL("jsonb_path_query_array(%s, '$.*')::text", raw_sql_field)
sql_operator = SQL_OPERATORS['like' if operator == 'in' else operator]
sql_right = SQL("%s", self.convert_to_column(value, model, validate=False))
unaccent = model.env.registry.unaccent
return SQL(
"(%s%s%s AND %s)",
unaccent(sql_left),
sql_operator,
unaccent(sql_right),
base_condition,
)
return base_condition
class Char(BaseString):
""" Basic string field, can be length-limited, usually displayed as a
single-line string in clients.
:param int size: the maximum size of values stored for that field
:param bool trim: states whether the value is trimmed or not (by default,
``True``). Note that the trim operation is applied by both the server code and the web client
This ensures consistent behavior between imported data and UI-entered data.
- The web client trims user input during in write/create flows in UI.
- The server trims values during import (in `base_import`) to avoid discrepancies between
trimmed form inputs and stored DB values.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
:type translate: bool or callable
"""
type = 'char'
trim: bool = True # whether value is trimmed (only by web client and base_import)
def _setup_attrs__(self, model_class, name):
super()._setup_attrs__(model_class, name)
assert self.size is None or isinstance(self.size, int), \
"Char field %s with non-integer size %r" % (self, self.size)
@property
def _column_type(self):
return ('varchar', pg_varchar(self.size))
def update_db_column(self, model, column):
if (
column and self.column_type[0] == 'varchar' and
column['udt_name'] == 'varchar' and column['character_maximum_length'] and
(self.size is None or column['character_maximum_length'] < self.size)
):
# the column's varchar size does not match self.size; convert it
sql.convert_column(model.env.cr, model._table, self.name, self.column_type[1])
super().update_db_column(model, column)
_related_size = property(attrgetter('size'))
_related_trim = property(attrgetter('trim'))
_description_size = property(attrgetter('size'))
_description_trim = property(attrgetter('trim'))
def get_depends(self, model):
depends, depends_context = super().get_depends(model)
# display_name may depend on context['lang'] (`test_lp1071710`)
if (
self.name == 'display_name'
and self.compute
and not self.store
and model._rec_name
and model._fields[model._rec_name].base_field.translate
and 'lang' not in depends_context
):
depends_context = [*depends_context, 'lang']
return depends, depends_context
class Text(BaseString):
""" Very similar to :class:`Char` but used for longer contents, does not
have a size and usually displayed as a multiline text box.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
:type translate: bool or callable
"""
type = 'text'
_column_type = ('text', 'text')
class Html(BaseString):
""" Encapsulates an html code content.
:param bool sanitize: whether value must be sanitized (default: ``True``)
:param bool sanitize_overridable: whether the sanitation can be bypassed by
the users part of the `base.group_sanitize_override` group (default: ``False``)
:param bool sanitize_tags: whether to sanitize tags
(only a white list of attributes is accepted, default: ``True``)
:param bool sanitize_attributes: whether to sanitize attributes
(only a white list of attributes is accepted, default: ``True``)
:param bool sanitize_style: whether to sanitize style attributes (default: ``False``)
:param bool sanitize_conditional_comments: whether to kill conditional comments. (default: ``True``)
:param bool sanitize_output_method: whether to sanitize using html or xhtml (default: ``html``)
:param bool strip_style: whether to strip style attributes
(removed and therefore not sanitized, default: ``False``)
:param bool strip_classes: whether to strip classes attributes (default: ``False``)
"""
type = 'html'
_column_type = ('text', 'text')
sanitize: bool = True # whether value must be sanitized
sanitize_overridable: bool = False # whether the sanitation can be bypassed by the users part of the `base.group_sanitize_override` group
sanitize_tags: bool = True # whether to sanitize tags (only a white list of attributes is accepted)
sanitize_attributes: bool = True # whether to sanitize attributes (only a white list of attributes is accepted)
sanitize_style: bool = False # whether to sanitize style attributes
sanitize_form: bool = True # whether to sanitize forms
sanitize_conditional_comments: bool = True # whether to kill conditional comments. Otherwise keep them but with their content sanitized.
sanitize_output_method: str = 'html' # whether to sanitize using html or xhtml
strip_style: bool = False # whether to strip style attributes (removed and therefore not sanitized)
strip_classes: bool = False # whether to strip classes attributes
def _get_attrs(self, model_class, name):
# called by _setup_attrs__(), working together with BaseString._setup_attrs__()
attrs = super()._get_attrs(model_class, name)
# Shortcut for common sanitize options
# Outgoing and incoming emails should not be sanitized with the same options.
# e.g. conditional comments: no need to keep conditional comments for incoming emails,
# we do not need this Microsoft Outlook client feature for emails displayed Odoo's web client.
# While we need to keep them in mail templates and mass mailings, because they could be rendered in Outlook.
if attrs.get('sanitize') == 'email_outgoing':
attrs['sanitize'] = True
attrs.update({key: value for key, value in {
'sanitize_tags': False,
'sanitize_attributes': False,
'sanitize_conditional_comments': False,
'sanitize_output_method': 'xml',
}.items() if key not in attrs})
# Translated sanitized html fields must use html_translate or a callable.
# `elif` intended, because HTML fields with translate=True and sanitize=False
# where not using `html_translate` before and they must remain without `html_translate`.
# Otherwise, breaks `--test-tags .test_render_field`, for instance.
elif attrs.get('translate') is True and attrs.get('sanitize', True):
attrs['translate'] = html_translate
return attrs
_related_sanitize = property(attrgetter('sanitize'))
_related_sanitize_tags = property(attrgetter('sanitize_tags'))
_related_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_related_sanitize_style = property(attrgetter('sanitize_style'))
_related_strip_style = property(attrgetter('strip_style'))
_related_strip_classes = property(attrgetter('strip_classes'))
_description_sanitize = property(attrgetter('sanitize'))
_description_sanitize_tags = property(attrgetter('sanitize_tags'))
_description_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_description_sanitize_style = property(attrgetter('sanitize_style'))
_description_strip_style = property(attrgetter('strip_style'))
_description_strip_classes = property(attrgetter('strip_classes'))
def convert_to_column(self, value, record, values=None, validate=True):
value = self._convert(value, record, validate=validate)
return super().convert_to_column(value, record, values, validate=False)
def convert_to_cache(self, value, record, validate=True):
return self._convert(value, record, validate)
def _convert(self, value, record, validate):
if value is None or value is False:
return None
if not validate or not self.sanitize:
return value
sanitize_vals = {
'silent': True,
'sanitize_tags': self.sanitize_tags,
'sanitize_attributes': self.sanitize_attributes,
'sanitize_style': self.sanitize_style,
'sanitize_form': self.sanitize_form,
'sanitize_conditional_comments': self.sanitize_conditional_comments,
'output_method': self.sanitize_output_method,
'strip_style': self.strip_style,
'strip_classes': self.strip_classes
}
if self.sanitize_overridable:
if record.env.user.has_group('base.group_sanitize_override'):
return value
original_value = record[self.name]
if original_value:
# Note that sanitize also normalize
original_value_sanitized = html_sanitize(original_value, **sanitize_vals)
original_value_normalized = html_normalize(original_value)
if (
not original_value_sanitized # sanitizer could empty it
or original_value_normalized != original_value_sanitized
):
# The field contains element(s) that would be removed if
# sanitized. It means that someone who was part of a group
# allowing to bypass the sanitation saved that field
# previously.
diff = unified_diff(
original_value_sanitized.splitlines(),
original_value_normalized.splitlines(),
)
with_colors = isinstance(logging.getLogger().handlers[0].formatter, ColoredFormatter)
diff_str = f'The field ({record._description}, {self.string}) will not be editable:\n'
for line in list(diff)[2:]:
if with_colors:
color = {'-': RED, '+': GREEN}.get(line[:1], DEFAULT)
diff_str += COLOR_PATTERN % (30 + color, 40 + DEFAULT, line.rstrip() + "\n")
else:
diff_str += line.rstrip() + '\n'
_logger.info(diff_str)
raise UserError(record.env._(
"The field value you're saving (%(model)s %(field)s) includes content that is "
"restricted for security reasons. It is possible that someone "
"with higher privileges previously modified it, and you are therefore "
"not able to modify it yourself while preserving the content.",
model=record._description, field=self.string,
))
return html_sanitize(value, **sanitize_vals)
def convert_to_record(self, value, record):
r = super().convert_to_record(value, record)
if isinstance(r, bytes):
r = r.decode()
return r and Markup(r)
def convert_to_read(self, value, record, use_display_name=True):
r = super().convert_to_read(value, record, use_display_name)
if isinstance(r, bytes):
r = r.decode()
return r and Markup(r)
def get_trans_terms(self, value):
# ensure the translation terms are stringified, otherwise we can break the PO file
return list(map(str, super().get_trans_terms(value)))
escape = staticmethod(markup_escape)
is_empty = staticmethod(is_html_empty)
to_plaintext = staticmethod(html2plaintext)
from_plaintext = staticmethod(plaintext2html)
class LangProxyDict(collections.abc.MutableMapping):
"""A view on a dict[id, dict[lang, value]] that maps id to value given a
fixed language."""
__slots__ = ('_cache', '_field', '_lang')
def __init__(self, field: BaseString, cache: dict, lang: str):
super().__init__()
self._field = field
self._cache = cache
self._lang = lang
def get(self, key, default=None):
# just for performance
vals = self._cache.get(key, SENTINEL)
if vals is SENTINEL:
return default
if vals is None:
return None
if not (self._field.compute or (self._field.store and (key or key.origin))):
# the field's value is neither computed, nor in database
# (non-stored field or new record without origin), so fallback on
# its 'en_US' value in cache
return vals.get(self._lang, vals.get('en_US', default))
return vals.get(self._lang, default)
def __getitem__(self, key):
vals = self._cache[key]
if vals is None:
return None
if not (self._field.compute or (self._field.store and (key or key.origin))):
# the field's value is neither computed, nor in database
# (non-stored field or new record without origin), so fallback on
# its 'en_US' value in cache
return vals.get(self._lang, vals.get('en_US'))
return vals[self._lang]
def __setitem__(self, key, value):
if value is None:
self._cache[key] = None
return
vals = self._cache.get(key)
if vals is None:
# key is not in cache, or {key: None} is in cache
self._cache[key] = vals = {self._lang: value}
else:
vals[self._lang] = value
if not (self._field.compute or (self._field.store and (key or key.origin))):
# the field's value is neither computed, nor in database
# (non-stored field or new record without origin), so the cache
# must contain the fallback 'en_US' value for other languages
vals.setdefault('en_US', value)
def __delitem__(self, key):
vals = self._cache.get(key)
if vals:
vals.pop(self._lang, None)
def __iter__(self):
for key, vals in self._cache.items():
if vals is None or self._lang in vals:
yield key
def __len__(self):
return sum(1 for _ in self)
def clear(self):
for vals in self._cache.values():
if vals:
vals.pop(self._lang, None)
def __repr__(self):
return f"<LangProxyDict lang={self._lang!r} size={len(self._cache)} at {hex(id(self))}>"

View file

@ -0,0 +1,56 @@
import functools
import typing
@functools.total_ordering
class NewId:
""" Pseudo-ids for new records, encapsulating an optional origin id (actual
record id) and an optional reference (any value).
"""
__slots__ = ('origin', 'ref', '__hash') # noqa: RUF023
def __init__(self, origin=None, ref=None):
self.origin = origin
self.ref = ref
self.__hash = hash(origin or ref or id(self))
def __bool__(self):
return False
def __eq__(self, other):
return isinstance(other, NewId) and (
(self.origin and other.origin and self.origin == other.origin)
or (self.ref and other.ref and self.ref == other.ref)
)
def __hash__(self):
return self.__hash
def __lt__(self, other):
if isinstance(other, NewId):
other = other.origin
if other is None:
return other > self.origin if self.origin else False
if isinstance(other, int):
return bool(self.origin) and self.origin < other
return NotImplemented
def __repr__(self):
return (
"<NewId origin=%r>" % self.origin if self.origin else
"<NewId ref=%r>" % self.ref if self.ref else
"<NewId 0x%x>" % id(self)
)
def __str__(self):
if self.origin or self.ref:
id_part = repr(self.origin or self.ref)
else:
id_part = hex(id(self))
return "NewId_%s" % id_part
# By default, in the ORM we initialize it as an int, but any type should work.
# However, and some parts of the ORM may assume it is an integer.
# Non-exhaustive list: relational fields, references, hierarchies, etc.
IdType: typing.TypeAlias = int | NewId | str

View file

@ -0,0 +1,626 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from __future__ import annotations
import logging
import typing
from collections import defaultdict
from types import MappingProxyType
from . import models
from . import fields # must be imported after models
from .utils import check_pg_name
from odoo.exceptions import ValidationError
from odoo.tools import (
OrderedSet,
LastOrderedSet,
discardattr,
frozendict,
sql,
)
from odoo.tools.translate import FIELD_TRANSLATE
if typing.TYPE_CHECKING:
from odoo.api import Environment
from odoo.fields import Field
from odoo.models import BaseModel
from odoo.modules.registry import Registry
_logger = logging.getLogger('odoo.registry')
# THE MODEL DEFINITIONS, MODEL CLASSES, AND MODEL INSTANCES
#
# The framework deals with two kinds of classes for models: the "model
# definitions" and the "model classes".
#
# The "model definitions" are the classes defined in modules source code: they
# define models and extend them. Those classes are essentially "static", for
# whatever that means in Python. The only exception is custom models: their
# model definition is created dynamically.
#
# The "model classes" are the ones you find in the registry. The recordsets of
# a model actually are instances of its model class. The "model class" of a
# model is created dynamically when the registry is built. It inherits (in the
# Python sense) from all the model definitions of the model, and possibly other
# model classes (when the model inherits from another model). It also carries
# model metadata inferred from its parent classes.
#
#
# THE MODEL CLASSES
#
# In the simplest case, a model class inherits from all the classes that define
# the model in a flat hierarchy. Consider the definitions of model 'a' below.
# The model class of 'a' inherits from the model definitions A1, A2, A3, in
# reverse order, to match the expected overriding order. The model class
# carries inferred metadata that is shared between all the recordsets of that
# model for a given registry.
#
# class A(Model): # A1 Model
# _name = 'a' / | \
# A3 A2 A1 <- model definitions
# class A(Model): # A2 \ | /
# _inherit = 'a' a <- model class: registry['a']
# |
# class A(Model): # A3 records <- model instances, like env['a']
# _inherit = 'a'
#
# Note that when the model inherits from another model, we actually make the
# model classes inherit from each other, so that extensions to an inherited
# model are visible in the model class of the child model, like in the
# following example.
#
# class A(Model): # A1
# _name = 'a' Model
# / / \ \
# class B(Model): # B1 / / \ \
# _name = 'b' / A2 A1 \
# B2 \ / B1
# class B(Model): # B2 \ \ / /
# _inherit = ['a', 'b'] \ a /
# \ | /
# class A(Model): # A2 \ | /
# _inherit = 'a' b
#
# To be more explicit, the parent classes of model 'a' are (A2, A1), and the
# ones of model 'b' are (B2, a, B1). Consequently, the MRO of model 'a' is
# [a, A2, A1, Model] while the MRO of 'b' is [b, B2, a, A2, A1, B1, Model].
#
#
# THE FIELDS OF A MODEL
#
# The fields of a model are given by the model's definitions, inherited models
# ('_inherit' and '_inherits') and other parties, like custom fields. Note that
# a field can be partially overridden when it appears on several definitions of
# its model. In that case, the field's final definition depends on the
# presence or absence of each model definition, which itself depends on the
# modules loaded in the registry.
#
# By design, the model class has access to all the fields on its model
# definitions. When possible, the field is used directly from its model
# definition. There are a number of cases where the field cannot be used
# directly:
# - the field is related (and bits may not be shared);
# - the field is overridden on model definitions;
# - the field is defined on another model (and accessible by mixin).
#
# The last case prevents sharing the field across registries, because the field
# object is specific to a model, and is used as a key in several key
# dictionaries, like the record cache and pending computations.
#
# Setting up a field on its model definition helps saving memory and time.
# Indeed, when sharing is possible, the field's setup is almost entirely done
# where the field was defined. It is thus done when the model definition was
# created, and it may be reused across registries.
#
# In the example below, the field 'foo' appears once on its model definition.
# Assuming that it is not related, that field can be set up directly on its
# model definition. If the model appears in several registries, the
# field 'foo' is effectively shared across registries.
#
# class A1(Model): Model
# _name = 'a' / \
# foo = ... / \
# bar = ... A2 A1
# bar foo, bar
# class A2(Model): \ /
# _inherit = 'a' \ /
# bar = ... a
# bar
#
# On the other hand, the field 'bar' is overridden in its model definitions. In
# that case, the framework recreates the field on the model class, which is
# never shared across registries. The field's setup will be based on its
# definitions, and will thus not be shared across registries.
#
# The so-called magic fields ('id', 'display_name', ...) used to be added on
# model classes. But doing so prevents them from being shared. So instead,
# we add them on definition classes that define a model without extending it.
# This increases the number of fields that are shared across registries.
def is_model_definition(cls: type) -> bool:
""" Return whether ``cls`` is a model definition class. """
return isinstance(cls, models.MetaModel) and getattr(cls, 'pool', None) is None
def is_model_class(cls: type) -> bool:
""" Return whether ``cls`` is a model registry class. """
return getattr(cls, 'pool', None) is not None
def add_to_registry(registry: Registry, model_def: type[BaseModel]) -> type[BaseModel]:
""" Add a model definition to the given registry, and return its
corresponding model class. This function creates or extends a model class
for the given model definition.
"""
assert is_model_definition(model_def)
if hasattr(model_def, '_constraints'):
_logger.warning("Model attribute '_constraints' is no longer supported, "
"please use @api.constrains on methods instead.")
if hasattr(model_def, '_sql_constraints'):
_logger.warning("Model attribute '_sql_constraints' is no longer supported, "
"please define model.Constraint on the model.")
# all models except 'base' implicitly inherit from 'base'
name = model_def._name
parent_names = list(model_def._inherit)
if name != 'base':
parent_names.append('base')
# create or retrieve the model's class
if name in parent_names:
if name not in registry:
raise TypeError(f"Model {name!r} does not exist in registry.")
model_cls = registry[name]
_check_model_extension(model_cls, model_def)
else:
model_cls = type(name, (model_def,), {
'pool': registry, # this makes it a model class
'_name': name,
'_register': False,
'_original_module': model_def._module,
'_inherit_module': {}, # map parent to introducing module
'_inherit_children': OrderedSet(), # names of children models
'_inherits_children': set(), # names of children models
'_fields__': {}, # populated in _setup()
'_table_objects': frozendict(), # populated in _setup()
})
model_cls._fields = MappingProxyType(model_cls._fields__)
# determine all the classes the model should inherit from
bases = LastOrderedSet([model_def])
for parent_name in parent_names:
if parent_name not in registry:
raise TypeError(f"Model {name!r} inherits from non-existing model {parent_name!r}.")
parent_cls = registry[parent_name]
if parent_name == name:
for base in parent_cls._base_classes__:
bases.add(base)
else:
_check_model_parent_extension(model_cls, model_def, parent_cls)
bases.add(parent_cls)
model_cls._inherit_module[parent_name] = model_def._module
parent_cls._inherit_children.add(name)
# model_cls.__bases__ must be assigned those classes; however, this
# operation is quite slow, so we do it once in method _prepare_setup()
model_cls._base_classes__ = tuple(bases)
# determine the attributes of the model's class
_init_model_class_attributes(model_cls)
check_pg_name(model_cls._table)
# Transience
if model_cls._transient and not model_cls._log_access:
raise TypeError(
"TransientModels must have log_access turned on, "
"in order to implement their vacuum policy"
)
# update the registry after all checks have passed
registry[name] = model_cls
# mark all impacted models for setup
for model_name in registry.descendants([name], '_inherit', '_inherits'):
registry[model_name]._setup_done__ = False
return model_cls
def _check_model_extension(model_cls: type[BaseModel], model_def: type[BaseModel]):
""" Check whether ``model_cls`` can be extended with ``model_def``. """
if model_cls._abstract and not model_def._abstract:
raise TypeError(
f"{model_def} transforms the abstract model {model_cls._name!r} into a non-abstract model. "
"That class should either inherit from AbstractModel, or set a different '_name'."
)
if model_cls._transient != model_def._transient:
if model_cls._transient:
raise TypeError(
f"{model_def} transforms the transient model {model_cls._name!r} into a non-transient model. "
"That class should either inherit from TransientModel, or set a different '_name'."
)
else:
raise TypeError(
f"{model_def} transforms the model {model_cls._name!r} into a transient model. "
"That class should either inherit from Model, or set a different '_name'."
)
def _check_model_parent_extension(model_cls: type[BaseModel], model_def: type[BaseModel], parent_cls: type[BaseModel]):
""" Check whether ``model_cls`` can inherit from ``parent_cls``. """
if model_cls._abstract and not parent_cls._abstract:
raise TypeError(
f"In {model_def}, abstract model {model_cls._name!r} cannot inherit from non-abstract model {parent_cls._name!r}."
)
def _init_model_class_attributes(model_cls: type[BaseModel]):
""" Initialize model class attributes. """
assert is_model_class(model_cls)
model_cls._description = model_cls._name
model_cls._table = model_cls._name.replace('.', '_')
model_cls._log_access = model_cls._auto
inherits = {}
depends = {}
for base in reversed(model_cls._base_classes__):
if is_model_definition(base):
# the following attributes are not taken from registry classes
if model_cls._name not in base._inherit and not base._description:
_logger.warning("The model %s has no _description", model_cls._name)
model_cls._description = base._description or model_cls._description
model_cls._table = base._table or model_cls._table
model_cls._log_access = getattr(base, '_log_access', model_cls._log_access)
inherits.update(base._inherits)
for mname, fnames in base._depends.items():
depends.setdefault(mname, []).extend(fnames)
# avoid assigning an empty dict to save memory
if inherits:
model_cls._inherits = inherits
if depends:
model_cls._depends = depends
# update _inherits_children of parent models
registry = model_cls.pool
for parent_name in model_cls._inherits:
registry[parent_name]._inherits_children.add(model_cls._name)
# recompute attributes of _inherit_children models
for child_name in model_cls._inherit_children:
_init_model_class_attributes(registry[child_name])
def setup_model_classes(env: Environment):
registry = env.registry
# we must setup ir.model before adding manual fields because _add_manual_models may
# depend on behavior that is implemented through overrides, such as is_mail_thread which
# is implemented through an override to env['ir.model']._instanciate_attrs
_prepare_setup(registry['ir.model'])
# add manual models
if registry._init_modules:
_add_manual_models(env)
# prepare the setup on all models
models_classes = list(registry.values())
for model_cls in models_classes:
_prepare_setup(model_cls)
# do the actual setup
for model_cls in models_classes:
_setup(model_cls, env)
for model_cls in models_classes:
_setup_fields(model_cls, env)
for model_cls in models_classes:
model_cls(env, (), ())._post_model_setup__()
def _prepare_setup(model_cls: type[BaseModel]):
""" Prepare the setup of the model. """
if model_cls._setup_done__:
assert model_cls.__bases__ == model_cls._base_classes__
return
# changing base classes is costly, do it only when necessary
if model_cls.__bases__ != model_cls._base_classes__:
model_cls.__bases__ = model_cls._base_classes__
# reset those attributes on the model's class for _setup_fields() below
for attr in ('_rec_name', '_active_name'):
discardattr(model_cls, attr)
# reset properties memoized on model_cls
model_cls._constraint_methods = models.BaseModel._constraint_methods
model_cls._ondelete_methods = models.BaseModel._ondelete_methods
model_cls._onchange_methods = models.BaseModel._onchange_methods
def _setup(model_cls: type[BaseModel], env: Environment):
""" Determine all the fields of the model. """
if model_cls._setup_done__:
return
# the classes that define this model, i.e., the ones that are not
# registry classes; the purpose of this attribute is to behave as a
# cache of [c for c in model_cls.mro() if not is_model_class(c))], which
# is heavily used in function fields.resolve_mro()
model_cls._model_classes__ = tuple(c for c in model_cls.mro() if getattr(c, 'pool', None) is None)
# 1. determine the proper fields of the model: the fields defined on the
# class and magic fields, not the inherited or custom ones
# retrieve fields from parent classes, and duplicate them on model_cls to
# avoid clashes with inheritance between different models
for name in model_cls._fields:
discardattr(model_cls, name)
model_cls._fields__.clear()
# collect the definitions of each field (base definition + overrides)
definitions = defaultdict(list)
for cls in reversed(model_cls._model_classes__):
# this condition is an optimization of is_model_definition(cls)
if isinstance(cls, models.MetaModel):
for field in cls._field_definitions:
definitions[field.name].append(field)
for name, fields_ in definitions.items():
if f'{model_cls._name}.{name}' in model_cls.pool._database_translated_fields:
# the field is currently translated in the database; ensure the
# field is translated to avoid converting its column to varchar
# and losing data
translate = next((
field._args__['translate'] for field in reversed(fields_) if 'translate' in field._args__
), False)
if not translate:
field_translate = FIELD_TRANSLATE.get(
model_cls.pool._database_translated_fields[f'{model_cls._name}.{name}'],
True
)
# patch the field definition by adding an override
_logger.debug("Patching %s.%s with translate=True", model_cls._name, name)
fields_.append(type(fields_[0])(translate=field_translate))
if f'{model_cls._name}.{name}' in model_cls.pool._database_company_dependent_fields:
# the field is currently company dependent in the database; ensure
# the field is company dependent to avoid converting its column to
# the base data type
company_dependent = next((
field._args__['company_dependent'] for field in reversed(fields_) if 'company_dependent' in field._args__
), False)
if not company_dependent:
# validate column type again in case the column type is changed by upgrade script
rows = env.execute_query(sql.SQL(
'SELECT data_type FROM information_schema.columns WHERE table_name = %s AND column_name = %s',
model_cls._table, name,
))
if rows and rows[0][0] == 'jsonb':
# patch the field definition by adding an override
_logger.warning("Patching %s.%s with company_dependent=True", model_cls._name, name)
fields_.append(type(fields_[0])(company_dependent=True))
if len(fields_) == 1 and fields_[0]._direct and fields_[0].model_name == model_cls._name:
model_cls._fields__[name] = fields_[0]
else:
Field = type(fields_[-1])
add_field(model_cls, name, Field(_base_fields__=tuple(fields_)))
# 2. add manual fields
if model_cls.pool._init_modules:
_add_manual_fields(model_cls, env)
# 3. make sure that parent models determine their own fields, then add
# inherited fields to model_cls
_check_inherits(model_cls)
for parent_name in model_cls._inherits:
_setup(model_cls.pool[parent_name], env)
_add_inherited_fields(model_cls)
# 4. initialize more field metadata
model_cls._setup_done__ = True
for field in model_cls._fields.values():
field.prepare_setup()
# 5. determine and validate rec_name
if model_cls._rec_name:
assert model_cls._rec_name in model_cls._fields, \
"Invalid _rec_name=%r for model %r" % (model_cls._rec_name, model_cls._name)
elif 'name' in model_cls._fields:
model_cls._rec_name = 'name'
elif model_cls._custom and 'x_name' in model_cls._fields:
model_cls._rec_name = 'x_name'
# 6. determine and validate active_name
if model_cls._active_name:
assert (model_cls._active_name in model_cls._fields
and model_cls._active_name in ('active', 'x_active')), \
("Invalid _active_name=%r for model %r; only 'active' and "
"'x_active' are supported and the field must be present on "
"the model") % (model_cls._active_name, model_cls._name)
elif 'active' in model_cls._fields:
model_cls._active_name = 'active'
elif 'x_active' in model_cls._fields:
model_cls._active_name = 'x_active'
# 7. determine table objects
assert not model_cls._table_object_definitions, "model_cls is a registry model"
model_cls._table_objects = frozendict({
cons.full_name(model_cls): cons
for cls in reversed(model_cls._model_classes__)
if isinstance(cls, models.MetaModel)
for cons in cls._table_object_definitions
})
def _check_inherits(model_cls: type[BaseModel]):
for comodel_name, field_name in model_cls._inherits.items():
field = model_cls._fields.get(field_name)
if not field or field.type != 'many2one':
raise TypeError(
f"Missing many2one field definition for _inherits reference {field_name!r} in model {model_cls._name!r}. "
f"Add a field like: {field_name} = fields.Many2one({comodel_name!r}, required=True, ondelete='cascade')"
)
if not (field.delegate and field.required and (field.ondelete or "").lower() in ("cascade", "restrict")):
raise TypeError(
f"Field definition for _inherits reference {field_name!r} in {model_cls._name!r} "
"must be marked as 'delegate', 'required' with ondelete='cascade' or 'restrict'"
)
def _add_inherited_fields(model_cls: type[BaseModel]):
""" Determine inherited fields. """
if model_cls._abstract or not model_cls._inherits:
return
# determine which fields can be inherited
to_inherit = {
name: (parent_fname, field)
for parent_model_name, parent_fname in model_cls._inherits.items()
for name, field in model_cls.pool[parent_model_name]._fields.items()
}
# add inherited fields that are not redefined locally
for name, (parent_fname, field) in to_inherit.items():
if name not in model_cls._fields:
# inherited fields are implemented as related fields, with the
# following specific properties:
# - reading inherited fields should not bypass access rights
# - copy inherited fields iff their original field is copied
field_cls = type(field)
add_field(model_cls, name, field_cls(
inherited=True,
inherited_field=field,
related=f"{parent_fname}.{name}",
related_sudo=False,
copy=field.copy,
readonly=field.readonly,
export_string_translation=field.export_string_translation,
))
def _setup_fields(model_cls: type[BaseModel], env: Environment):
""" Setup the fields, except for recomputation triggers. """
bad_fields = []
many2one_company_dependents = model_cls.pool.many2one_company_dependents
model = model_cls(env, (), ())
for name, field in model_cls._fields.items():
try:
field.setup(model)
except Exception:
if field.base_field.manual:
# Something goes wrong when setup a manual field.
# This can happen with related fields using another manual many2one field
# that hasn't been loaded because the comodel does not exist yet.
# This can also be a manual function field depending on not loaded fields yet.
bad_fields.append(name)
continue
raise
if field.type == 'many2one' and field.company_dependent:
many2one_company_dependents.add(field.comodel_name, field)
for name in bad_fields:
pop_field(model_cls, name)
def _add_manual_models(env: Environment):
""" Add extra models to the registry. """
# clean up registry first
for name, model_cls in list(env.registry.items()):
if model_cls._custom:
del env.registry.models[name]
# remove the model's name from its parents' _inherit_children
for parent_cls in model_cls.__bases__:
if hasattr(parent_cls, 'pool'):
parent_cls._inherit_children.discard(name)
# we cannot use self._fields to determine translated fields, as it has not been set up yet
env.cr.execute("SELECT *, name->>'en_US' AS name FROM ir_model WHERE state = 'manual'")
for model_data in env.cr.dictfetchall():
attrs = env['ir.model']._instanciate_attrs(model_data)
# adapt _auto and _log_access if necessary
table_name = model_data["model"].replace(".", "_")
table_kind = sql.table_kind(env.cr, table_name)
if table_kind not in (sql.TableKind.Regular, None):
_logger.info(
"Model %r is backed by table %r which is not a regular table (%r), disabling automatic schema management",
model_data["model"], table_name, table_kind,
)
attrs['_auto'] = False
env.cr.execute(
""" SELECT a.attname
FROM pg_attribute a
JOIN pg_class t ON a.attrelid = t.oid AND t.relname = %s
WHERE a.attnum > 0 -- skip system columns """,
[table_name]
)
columns = {colinfo[0] for colinfo in env.cr.fetchall()}
attrs['_log_access'] = set(models.LOG_ACCESS_COLUMNS) <= columns
model_def = type('CustomDefinitionModel', (models.Model,), attrs)
add_to_registry(env.registry, model_def)
def _add_manual_fields(model_cls: type[BaseModel], env: Environment):
""" Add extra fields on model. """
IrModelFields = env['ir.model.fields']
fields_data = IrModelFields._get_manual_field_data(model_cls._name)
for name, field_data in fields_data.items():
if name not in model_cls._fields and field_data['state'] == 'manual':
try:
attrs = IrModelFields._instanciate_attrs(field_data)
if attrs:
field = fields.Field._by_type__[field_data['ttype']](**attrs)
add_field(model_cls, name, field)
except Exception:
_logger.exception("Failed to load field %s.%s: skipped", model_cls._name, field_data['name'])
def add_field(model_cls: type[BaseModel], name: str, field: Field):
""" Add the given ``field`` under the given ``name`` on the model class of the given ``model``. """
# Assert the name is an existing field in the model, or any model in the _inherits
# or a custom field (starting by `x_`)
is_class_field = any(
isinstance(getattr(model, name, None), fields.Field)
for model in [model_cls] + [model_cls.pool[inherit] for inherit in model_cls._inherits]
)
if not (is_class_field or name.startswith('x_')):
raise ValidationError( # pylint: disable=missing-gettext
f"The field `{name}` is not defined in the `{model_cls._name}` Python class and does not start with 'x_'"
)
# Assert the attribute to assign is a Field
if not isinstance(field, fields.Field):
raise ValidationError("You can only add `fields.Field` objects to a model fields") # pylint: disable=missing-gettext
if not isinstance(getattr(model_cls, name, field), fields.Field):
_logger.warning("In model %r, field %r overriding existing value", model_cls._name, name)
setattr(model_cls, name, field)
field._toplevel = True
field.__set_name__(model_cls, name)
# add field as an attribute and in model_cls._fields__ (for reflection)
model_cls._fields__[name] = field
def pop_field(model_cls: type[BaseModel], name: str) -> Field | None:
""" Remove the field with the given ``name`` from the model class of ``model``. """
field = model_cls._fields__.pop(name, None)
discardattr(model_cls, name)
if model_cls._rec_name == name:
# fixup _rec_name and display_name's dependencies
model_cls._rec_name = None
if model_cls.display_name in model_cls.pool.field_depends:
model_cls.pool.field_depends[model_cls.display_name] = tuple(
dep for dep in model_cls.pool.field_depends[model_cls.display_name] if dep != name
)
return field

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,82 @@
import datetime
from odoo.tools import SQL, config, lazy_classproperty
from odoo.tools.constants import GC_UNLINK_LIMIT
from . import decorators as api
from .models import Model
class TransientModel(Model):
""" Model super-class for transient records, meant to be temporarily
persistent, and regularly vacuum-cleaned.
A TransientModel has a simplified access rights management, all users can
create new records, and may only access the records they created. The
superuser has unrestricted access to all TransientModel records.
"""
_auto: bool = True # automatically create database backend
_register: bool = False # not visible in ORM registry, meant to be python-inherited only
_abstract = False # not abstract
_transient = True # transient
# default values for _transient_vacuum()
_transient_max_count = lazy_classproperty(lambda _: int(config.get('osv_memory_count_limit')))
"maximum number of transient records, unlimited if ``0``"
_transient_max_hours = lazy_classproperty(lambda _: float(config.get('transient_age_limit')))
"maximum idle lifetime (in hours), unlimited if ``0``"
@api.autovacuum
def _transient_vacuum(self):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
:attr:`_transient_max_count` or :attr:`_transient_max_hours` conditions
(if any) are reached.
Actual cleaning will happen only once every 5 minutes. This means this
method can be called frequently (e.g. whenever a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (aka 12 minutes), max_count = 20, there are
55 rows in the table, 10 created/changed in the last 5 minutes, an
additional 12 created/changed between 5 and 10 minutes ago, the rest
created/changed more than 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12
minutes
- count based vacuum will wipe out another 12 rows. Not just 2,
otherwise each addition would immediately cause the maximum to be
reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT
be deleted
"""
has_remaining = False
if self._transient_max_hours:
# Age-based expiration
has_remaining |= self._transient_clean_rows_older_than(self._transient_max_hours * 60 * 60)
if self._transient_max_count:
# Count-based expiration
has_remaining |= self._transient_clean_old_rows(self._transient_max_count)
# This method is shared by all transient models therefore,
# return the model name to be logged and if whether there are more rows to process
return self._name, has_remaining
def _transient_clean_old_rows(self, max_count: int) -> bool:
# Check how many rows we have in the table
self._cr.execute(SQL("SELECT count(*) FROM %s", SQL.identifier(self._table)))
[count] = self._cr.fetchone()
if count > max_count:
return self._transient_clean_rows_older_than(300)
return False
def _transient_clean_rows_older_than(self, seconds: int) -> bool:
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
now = self.env.cr.now()
domain = [('write_date', '<', now - datetime.timedelta(seconds=seconds))]
records = self.sudo().search(domain, limit=GC_UNLINK_LIMIT)
records.unlink()
return len(records) == GC_UNLINK_LIMIT

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,205 @@
from __future__ import annotations
import typing
from odoo.tools import sql
if typing.TYPE_CHECKING:
from collections.abc import Callable
import psycopg2.extensions
from .environments import Environment
from .models import BaseModel
from .registry import Registry
ConstraintMessageType = (
str
| Callable[[Environment, psycopg2.extensions.Diagnostics | None], str]
)
IndexDefinitionType = (
str
| Callable[[Registry], str]
)
class TableObject:
""" Declares a SQL object related to the model.
The identifier of the SQL object will be "{model._table}_{name}".
"""
name: str
message: ConstraintMessageType = ''
_module: str = ''
def __init__(self):
"""Abstract SQL object"""
# to avoid confusion: name is unique inside the model, full_name is in the database
self.name = ''
def __set_name__(self, owner, name):
# database objects should be private member fo the class:
# first of all, you should not need to access them from any model
# and this avoid having them in the middle of the fields when listing members
assert name.startswith('_'), "Names of SQL objects in a model must start with '_'"
assert not name.startswith(f"_{owner.__name__}__"), "Names of SQL objects must not be mangled"
self.name = name[1:]
if getattr(owner, 'pool', None) is None: # models.is_model_definition(owner)
# only for fields on definition classes, not registry classes
self._module = owner._module
owner._table_object_definitions.append(self)
def get_definition(self, registry: Registry) -> str:
raise NotImplementedError
def full_name(self, model: BaseModel) -> str:
assert self.name, f"The table object is not named ({self.definition})"
name = f"{model._table}_{self.name}"
return sql.make_identifier(name)
def get_error_message(self, model: BaseModel, diagnostics=None) -> str:
"""Build an error message for the object/constraint.
:param model: Optional model on which the constraint is defined
:param diagnostics: Optional diagnostics from the raised exception
:return: Translated error for the user
"""
message = self.message
if callable(message):
return message(model.env, diagnostics)
return message
def apply_to_database(self, model: BaseModel):
raise NotImplementedError
def __str__(self) -> str:
return f"({self.name!r}={self.definition!r}, {self.message!r})"
class Constraint(TableObject):
""" SQL table constraint.
The definition of the constraint is used to `ADD CONSTRAINT` on the table.
"""
def __init__(
self,
definition: str,
message: ConstraintMessageType = '',
) -> None:
""" SQL table containt.
The definition is the SQL that will be used to add the constraint.
If the constraint is violated, we will show the message to the user
or an empty string to get a default message.
Examples of constraint definitions:
- CHECK (x > 0)
- FOREIGN KEY (abc) REFERENCES some_table(id)
- UNIQUE (user_id)
"""
super().__init__()
self._definition = definition
if message:
self.message = message
def get_definition(self, registry: Registry):
return self._definition
def apply_to_database(self, model: BaseModel):
cr = model.env.cr
conname = self.full_name(model)
definition = self.get_definition(model.pool)
current_definition = sql.constraint_definition(cr, model._table, conname)
if current_definition == definition:
return
if current_definition:
# constraint exists but its definition may have changed
sql.drop_constraint(cr, model._table, conname)
model.pool.post_constraint(
cr, lambda cr: sql.add_constraint(cr, model._table, conname, definition), conname)
class Index(TableObject):
""" Index on the table.
``CREATE INDEX ... ON model_table <your definition>``.
"""
unique: bool = False
def __init__(self, definition: IndexDefinitionType):
""" Index in SQL.
The name of the SQL object will be "{model._table}_{key}". The definition
is the SQL that will be used to create the constraint.
Example of definition:
- (group_id, active) WHERE active IS TRUE
- USING btree (group_id, user_id)
"""
super().__init__()
self._index_definition = definition
def get_definition(self, registry: Registry):
if callable(self._index_definition):
definition = self._index_definition(registry)
else:
definition = self._index_definition
if not definition:
return ''
return f"{'UNIQUE ' if self.unique else ''}INDEX {definition}"
def apply_to_database(self, model: BaseModel):
cr = model.env.cr
conname = self.full_name(model)
definition = self.get_definition(model.pool)
db_definition, db_comment = sql.index_definition(cr, conname)
if db_comment == definition or (not db_comment and db_definition):
# keep when the definition matches the comment in the database
# or if we have an index without a comment (this is used by support to tweak indexes)
return
if db_definition:
# constraint exists but its definition may have changed
sql.drop_index(cr, conname, model._table)
if callable(self._index_definition):
definition_clause = self._index_definition(model.pool)
else:
definition_clause = self._index_definition
if not definition_clause:
# Don't create index with an empty definition
return
model.pool.post_constraint(cr, lambda cr: sql.add_index(
cr,
conname,
model._table,
comment=definition,
definition=definition_clause,
unique=self.unique,
), conname)
class UniqueIndex(Index):
""" Unique index on the table.
``CREATE UNIQUE INDEX ... ON model_table <your definition>``.
"""
unique = True
def __init__(self, definition: IndexDefinitionType, message: ConstraintMessageType = ''):
""" Unique index in SQL.
The name of the SQL object will be "{model._table}_{key}". The definition
is the SQL that will be used to create the constraint.
You can also specify a message to be used when constraint is violated.
Example of definition:
- (group_id, active) WHERE active IS TRUE
- USING btree (group_id, user_id)
"""
super().__init__(definition)
if message:
self.message = message

View file

@ -0,0 +1,27 @@
# ruff: noqa: E402, F401
import typing
from collections.abc import Mapping
from .commands import Command
from .domains import Domain
from .environments import Environment
from .fields import Field
from .identifiers import IdType, NewId
from .models import BaseModel
from .registry import Registry
try:
# needed extensions for python<3.11
from typing_extensions import Self
except ImportError:
try:
from typing import Self
except ImportError:
Self = typing.TypeVar("Self") # type: ignore
DomainType = Domain | list[str | tuple[str, str, typing.Any]]
ContextType = Mapping[str, typing.Any]
ValuesType = dict[str, typing.Any]
ModelType = typing.TypeVar("ModelType", bound=BaseModel)
if typing.TYPE_CHECKING:
from .commands import CommandValue

View file

@ -0,0 +1,149 @@
import re
import warnings
from collections.abc import Set as AbstractSet
import dateutil.relativedelta
from odoo.exceptions import AccessError, ValidationError
from odoo.tools import SQL
regex_alphanumeric = re.compile(r'^[a-z0-9_]+$')
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
regex_pg_name = re.compile(r'^[a-z_][a-z0-9_$]*$', re.IGNORECASE)
# match private methods, to prevent their remote invocation
regex_private = re.compile(r'^(_.*|init)$')
# types handled as collections
COLLECTION_TYPES = (list, tuple, AbstractSet)
# The hard-coded super-user id (a.k.a. root user, or OdooBot).
SUPERUSER_ID = 1
# _read_group stuff
READ_GROUP_TIME_GRANULARITY = {
'hour': dateutil.relativedelta.relativedelta(hours=1),
'day': dateutil.relativedelta.relativedelta(days=1),
'week': dateutil.relativedelta.relativedelta(days=7),
'month': dateutil.relativedelta.relativedelta(months=1),
'quarter': dateutil.relativedelta.relativedelta(months=3),
'year': dateutil.relativedelta.relativedelta(years=1)
}
READ_GROUP_NUMBER_GRANULARITY = {
'year_number': 'year',
'quarter_number': 'quarter',
'month_number': 'month',
'iso_week_number': 'week', # ISO week number because anything else than ISO is nonsense
'day_of_year': 'doy',
'day_of_month': 'day',
'day_of_week': 'dow',
'hour_number': 'hour',
'minute_number': 'minute',
'second_number': 'second',
}
READ_GROUP_ALL_TIME_GRANULARITY = READ_GROUP_TIME_GRANULARITY | READ_GROUP_NUMBER_GRANULARITY
# SQL operators with spaces around them
# hardcoded to avoid changing SQL injection linting
SQL_OPERATORS = {
"=": SQL(" = "),
"!=": SQL(" != "),
"in": SQL(" IN "),
"not in": SQL(" NOT IN "),
"<": SQL(" < "),
">": SQL(" > "),
"<=": SQL(" <= "),
">=": SQL(" >= "),
"like": SQL(" LIKE "),
"ilike": SQL(" ILIKE "),
"=like": SQL(" LIKE "),
"=ilike": SQL(" ILIKE "),
"not like": SQL(" NOT LIKE "),
"not ilike": SQL(" NOT ILIKE "),
"not =like": SQL(" NOT LIKE "),
"not =ilike": SQL(" NOT ILIKE "),
}
def check_method_name(name):
""" Raise an ``AccessError`` if ``name`` is a private method name. """
warnings.warn("Since 19.0, use odoo.service.model.get_public_method", DeprecationWarning)
if regex_private.match(name):
raise AccessError('Private methods (such as %s) cannot be called remotely.' % name)
def check_object_name(name):
""" Check if the given name is a valid model name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
return regex_object_name.match(name) is not None
def check_pg_name(name):
""" Check whether the given name is a valid PostgreSQL identifier name. """
if not regex_pg_name.match(name):
raise ValidationError("Invalid characters in table name %r" % name)
if len(name) > 63:
raise ValidationError("Table name %r is too long" % name)
def parse_field_expr(field_expr: str) -> tuple[str, str | None]:
if (property_index := field_expr.find(".")) >= 0:
property_name = field_expr[property_index + 1:]
field_expr = field_expr[:property_index]
else:
property_name = None
if not field_expr:
raise ValueError(f"Invalid field expression {field_expr!r}")
return field_expr, property_name
def expand_ids(id0, ids):
""" Return an iterator of unique ids from the concatenation of ``[id0]`` and
``ids``, and of the same kind (all real or all new).
"""
yield id0
seen = {id0}
kind = bool(id0)
for id_ in ids:
if id_ not in seen and bool(id_) == kind:
yield id_
seen.add(id_)
class OriginIds:
""" A reversible iterable returning the origin ids of a collection of ``ids``.
Actual ids are returned as is, and ids without origin are not returned.
"""
__slots__ = ['ids']
def __init__(self, ids):
self.ids = ids
def __iter__(self):
for id_ in self.ids:
if id_ := id_ or getattr(id_, 'origin', None):
yield id_
def __reversed__(self):
for id_ in reversed(self.ids):
if id_ := id_ or getattr(id_, 'origin', None):
yield id_
origin_ids = OriginIds