19.0 vanilla

This commit is contained in:
Ernad Husremovic 2026-03-09 09:30:27 +01:00
parent d1963a3c3a
commit 2d3ee4855a
7430 changed files with 2687981 additions and 2965473 deletions

View file

@ -62,7 +62,7 @@ import typing
import warnings
from datetime import date, datetime, time, timedelta, timezone
from odoo.exceptions import UserError
from odoo.exceptions import MissingError, UserError
from odoo.tools import SQL, OrderedSet, Query, classproperty, partition, str2bool
from odoo.tools.date_utils import parse_date, parse_iso_date
from .identifiers import NewId
@ -1566,7 +1566,7 @@ def _value_to_datetime(value, env, iso_only=False):
tz = None
elif (tz := env.tz) != pytz.utc:
# get the tzinfo (without LMT)
tz = tz.localize(datetime.now()).tzinfo
tz = tz.localize(datetime.combine(value, time.min)).tzinfo
else:
tz = None
value = datetime.combine(value, time.min, tz)
@ -1738,16 +1738,20 @@ def _operator_hierarchy(condition, model):
if isinstance(result, Domain):
if field.name == 'id':
return result
return DomainCondition(field.name, 'any', result)
return DomainCondition(field.name, 'any!', result)
return DomainCondition(field.name, 'in', result)
def _operator_child_of_domain(comodel: BaseModel, parent):
"""Return a set of ids or a domain to find all children of given model"""
if comodel._parent_store and parent == comodel._parent_name:
try:
paths = comodel.mapped('parent_path')
except MissingError:
paths = comodel.exists().mapped('parent_path')
domain = Domain.OR(
DomainCondition('parent_path', '=like', rec.parent_path + '%') # type: ignore
for rec in comodel
DomainCondition('parent_path', '=like', path + '%') # type: ignore
for path in paths
)
return domain
else:
@ -1766,16 +1770,24 @@ def _operator_parent_of_domain(comodel: BaseModel, parent):
"""Return a set of ids or a domain to find all parents of given model"""
parent_ids: OrderedSet[int]
if comodel._parent_store and parent == comodel._parent_name:
try:
paths = comodel.mapped('parent_path')
except MissingError:
paths = comodel.exists().mapped('parent_path')
parent_ids = OrderedSet(
int(label)
for rec in comodel
for label in rec.parent_path.split('/')[:-1] # type: ignore
for path in paths
for label in path.split('/')[:-1]
)
else:
# recursively retrieve all parent nodes with sudo() to avoid
# access rights errors; the filtering of forbidden records is
# done by the rest of the domain
parent_ids = OrderedSet()
try:
comodel.mapped(parent)
except MissingError:
comodel = comodel.exists()
while comodel:
parent_ids.update(comodel._ids)
comodel = comodel[parent].filtered(lambda p: p.id not in parent_ids)

View file

@ -584,7 +584,7 @@ class Transaction:
self.cache = Cache(self)
# temporary directories (managed in odoo.tools.file_open_temporary_directory)
self.__file_open_tmp_paths = () # type: ignore # noqa: PLE0237
self.__file_open_tmp_paths = [] # type: ignore # noqa: PLE0237
def flush(self) -> None:
""" Flush pending computations and updates in the transaction. """

View file

@ -17,7 +17,7 @@ from operator import attrgetter
from psycopg2.extras import Json as PsycopgJson
from odoo.exceptions import AccessError, MissingError
from odoo.tools import Query, SQL, sql
from odoo.tools import Query, SQL, reset_cached_properties, sql
from odoo.tools.constants import PREFETCH_MAX
from odoo.tools.misc import SENTINEL, ReadonlyDict, Sentinel, unique
@ -550,7 +550,8 @@ class Field(typing.Generic[T]):
warnings.warn(f'Property {self}.readonly should be a boolean ({self.readonly}).', stacklevel=1)
self._setup_done = True
# column_type might be changed during Field.setup
reset_cached_properties(self)
#
# Setup of non-related fields
#
@ -910,6 +911,9 @@ class Field(typing.Generic[T]):
def _description_sortable(self, env: Environment):
if self.column_type and self.store: # shortcut
return True
if self.inherited_field and self.inherited_field._description_sortable(env):
# avoid compuation for inherited field
return True
model = env[self.model_name]
query = model._as_query(ordered=False)
@ -922,6 +926,9 @@ class Field(typing.Generic[T]):
def _description_groupable(self, env: Environment):
if self.column_type and self.store: # shortcut
return True
if self.inherited_field and self.inherited_field._description_groupable(env):
# avoid compuation for inherited field
return True
model = env[self.model_name]
query = model._as_query(ordered=False)
@ -935,6 +942,9 @@ class Field(typing.Generic[T]):
def _description_aggregator(self, env: Environment):
if not self.aggregator or (self.column_type and self.store): # shortcut
return self.aggregator
if self.inherited_field and self.inherited_field._description_aggregator(env):
# avoid compuation for inherited field
return self.inherited_field.aggregator
model = env[self.model_name]
query = model._as_query(ordered=False)
@ -1131,10 +1141,7 @@ class Field(typing.Generic[T]):
return
if column['udt_name'] == self.column_type[0]:
return
if column['is_nullable'] == 'NO':
sql.drop_not_null(model.env.cr, model._table, self.name)
self._convert_db_column(model, column)
column.clear() # remove information, because it may no longer be valid
def _convert_db_column(self, model: BaseModel, column: dict[str, typing.Any]):
""" Convert the given database column to the type of the field. """
@ -1441,7 +1448,7 @@ class Field(typing.Generic[T]):
yield '$'
# no need to match r'.*' in else because we only use .match()
like_regex = re.compile("".join(build_like_regex(unaccent(value), "=" in operator)))
like_regex = re.compile("".join(build_like_regex(unaccent(value), "=" in operator)), flags=re.DOTALL)
return lambda rec: like_regex.match(unaccent(getter(rec)))
# -------------------------------------------------

View file

@ -310,7 +310,14 @@ class Image(Binary):
self._update_cache(record, value, dirty=True)
def _image_process(self, value, env):
if self.readonly and not self.max_width and not self.max_height:
if self.readonly and (
(not self.max_width and not self.max_height)
or (
isinstance(self.related_field, Image)
and self.max_width == self.related_field.max_width
and self.max_height == self.related_field.max_height
)
):
# no need to process images for computed fields, or related fields
return value
try:

View file

@ -6,7 +6,7 @@ import typing
from psycopg2.extras import Json as PsycopgJson
from odoo.tools import SQL
from odoo.tools import SQL, json_default
from .fields import Field
from .identifiers import IdType
@ -54,8 +54,8 @@ class Boolean(Field[bool]):
class Json(Field):
""" JSON Field that contain unstructured information in jsonb PostgreSQL column.
This field is still in beta
Some features have not been implemented and won't be implemented in stable versions, including:
Some features won't be implemented, including:
* searching
* indexing
* mutating the values.
@ -71,10 +71,12 @@ class Json(Field):
def convert_to_cache(self, value, record, validate=True):
if not value:
return None
return json.loads(json.dumps(value))
return json.loads(json.dumps(value, ensure_ascii=False, default=json_default))
def convert_to_column(self, value, record, values=None, validate=True):
if not value:
if validate:
value = self.convert_to_cache(value, record)
if value is None:
return None
return PsycopgJson(value)

View file

@ -66,6 +66,14 @@ class Float(Field[float]):
:class:`~odoo.addons.base.models.decimal_precision.DecimalPrecision` record name.
:type digits: tuple(int,int) or str
:param min_display_digits: An int or a string referencing a
:class:`~odoo.addons.base.models.decimal_precision.DecimalPrecision` record name.
Represents the minimum number of decimal digits to display in the UI.
So if it's equal to 3:
- `3.1` will be shown as `'3.100'`.
- `3.1234` will be shown as `'3.1234'`.
:type min_display_digits: int or str
When a float is a quantity associated with an unit of measure, it is important
to use the right tool to compare or round values with the correct precision.
@ -99,11 +107,20 @@ class Float(Field[float]):
type = 'float'
_digits: str | tuple[int, int] | None = None # digits argument passed to class initializer
_min_display_digits: str | int | None = None
falsy_value = 0.0
aggregator = 'sum'
def __init__(self, string: str | Sentinel = SENTINEL, digits: str | tuple[int, int] | Sentinel | None = SENTINEL, **kwargs):
super().__init__(string=string, _digits=digits, **kwargs)
def __init__(
self,
string: str | Sentinel = SENTINEL,
digits: str | tuple[int, int] | typing.Literal[0, False] | Sentinel | None = SENTINEL,
min_display_digits: str | int | Sentinel | None = SENTINEL,
**kwargs,
):
if digits is SENTINEL and min_display_digits is not SENTINEL:
digits = False
super().__init__(string=string, _digits=digits, _min_display_digits=min_display_digits, **kwargs)
@property
def _column_type(self):
@ -122,11 +139,19 @@ class Float(Field[float]):
else:
return self._digits
def get_min_display_digits(self, env):
if isinstance(self._min_display_digits, str):
return env['decimal.precision'].precision_get(self._min_display_digits)
return self._min_display_digits
_related__digits = property(attrgetter('_digits'))
def _description_digits(self, env: Environment) -> tuple[int, int] | None:
return self.get_digits(env)
def _description_min_display_digits(self, env):
return self.get_min_display_digits(env)
def convert_to_column(self, value, record, values=None, validate=True):
value_float = value = float(value or 0.0)
if digits := self.get_digits(record.env):

View file

@ -642,7 +642,7 @@ class Properties(Field):
raise ValueError(f"Missing property name for {self}")
def get_property(record):
property_value = self.__get__(record)
property_value = self.__get__(record.with_context(property_selection_get_key=True))
value = property_value.get(property_name)
if value:
return value
@ -828,6 +828,8 @@ class Property(abc.Mapping):
return self.record.env[prop.get('comodel')].browse(prop.get('value'))
if prop.get('type') == 'selection' and prop.get('value'):
if self.record.env.context.get('property_selection_get_key'):
return next((sel[0] for sel in prop.get('selection') if sel[0] == prop['value']), False)
return next((sel[1] for sel in prop.get('selection') if sel[0] == prop['value']), False)
if prop.get('type') == 'tags' and prop.get('value'):

View file

@ -64,6 +64,12 @@ class _Relational(Field[BaseModel]):
# a lot of missing records, just fetch that field
remaining = records[len(vals):]
remaining.fetch([self.name])
# fetch does not raise MissingError, check value
if record_id not in field_cache:
raise MissingError("\n".join([
env._("Record does not exist or has been deleted."),
env._("(Record: %(record)s, User: %(user)s)", record=record_id, user=env.uid),
])) from None
else:
remaining = records.__class__(env, (record_id,), records._prefetch_ids)
super().__get__(remaining, owner)
@ -870,7 +876,8 @@ class One2many(_RelationalMulti):
# link self to its inverse field and vice-versa
comodel = model.env[self.comodel_name]
try:
comodel._fields[self.inverse_name]
field = comodel._fields[self.inverse_name]
field.setup(comodel)
except KeyError:
raise ValueError(f"{self.inverse_name!r} declared in {self!r} does not exist on {comodel._name!r}.")
@ -1358,10 +1365,14 @@ class Many2many(_RelationalMulti):
context.update(self.context)
comodel = records.env[self.comodel_name].with_context(**context)
# bypass the access during search if method is overwriten to avoid
# possibly filtering all records of the comodel before joining
filter_access = self.bypass_search_access and type(comodel)._search is not BaseModel._search
# make the query for the lines
domain = self.get_comodel_domain(records)
try:
query = comodel._search(domain, order=comodel._order)
query = comodel._search(domain, order=comodel._order, bypass_access=filter_access)
except AccessError as e:
raise AccessError(records.env._("Failed to read field %s", self) + '\n' + str(e)) from e
@ -1378,6 +1389,16 @@ class Many2many(_RelationalMulti):
for id1, id2 in records.env.execute_query(query.select(sql_id1, sql_id2)):
group[id1].append(id2)
# filter using record rules
if filter_access and group:
corecord_ids = OrderedSet(id_ for ids in group.values() for id_ in ids)
accessible_corecords = comodel.browse(corecord_ids)._filtered_access('read')
if len(accessible_corecords) < len(corecord_ids):
# some records are inaccessible, remove them from groups
corecord_ids = set(accessible_corecords._ids)
for id1, ids in group.items():
group[id1] = [id_ for id_ in ids if id_ in corecord_ids]
# store result in cache
values = [tuple(group[id_]) for id_ in records._ids]
self._insert_cache(records, values)

View file

@ -206,11 +206,8 @@ class Selection(Field[str | typing.Literal[False]]):
# force all values to be strings (check _get_year_selection)
return [(str(key), str(label)) for key, label in selection]
# translate selection labels
if env.lang:
return env['ir.model.fields'].get_field_selection(self.model_name, self.name)
else:
return selection
translations = dict(env['ir.model.fields'].get_field_selection(self.model_name, self.name))
return [(key, translations.get(key, label)) for key, label in selection]
def _default_group_expand(self, records, groups, domain):
# return a group per selection option, in definition order

View file

@ -129,16 +129,20 @@ class BaseString(Field[str | typing.Literal[False]]):
):
base_lang = record._get_base_lang()
lang = record.env.lang or 'en_US'
delay_translation = value != record.with_context(edit_translations=None, check_translations=None, lang=lang)[self.name]
if lang != base_lang:
base_value = record.with_context(edit_translations=None, check_translations=True, lang=base_lang)[self.name]
base_terms_iter = iter(self.get_trans_terms(base_value))
get_base = lambda term: next(base_terms_iter)
base_terms = self.get_trans_terms(base_value)
translated_terms = self.get_trans_terms(value) if value != base_value else base_terms
if len(base_terms) != len(translated_terms):
# term number mismatch, ignore all translations
value = base_value
translated_terms = base_terms
get_base = dict(zip(translated_terms, base_terms)).__getitem__
else:
get_base = lambda term: term
delay_translation = value != record.with_context(edit_translations=None, check_translations=None, lang=lang)[self.name]
# use a wrapper to let the frontend js code identify each term and
# its metadata in the 'edit_translations' context
def translate_func(term):

View file

@ -400,7 +400,8 @@ def _setup(model_cls: type[BaseModel], env: Environment):
if not company_dependent:
# validate column type again in case the column type is changed by upgrade script
rows = env.execute_query(sql.SQL(
'SELECT data_type FROM information_schema.columns WHERE table_name = %s AND column_name = %s',
'SELECT data_type FROM information_schema.columns'
' WHERE table_name = %s AND column_name = %s AND table_schema = current_schema',
model_cls._table, name,
))
if rows and rows[0][0] == 'jsonb':
@ -560,7 +561,8 @@ def _add_manual_models(env: Environment):
""" SELECT a.attname
FROM pg_attribute a
JOIN pg_class t ON a.attrelid = t.oid AND t.relname = %s
WHERE a.attnum > 0 -- skip system columns """,
WHERE a.attnum > 0 -- skip system columns
AND t.relnamespace = current_schema::regnamespace """,
[table_name]
)
columns = {colinfo[0] for colinfo in env.cr.fetchall()}
@ -594,7 +596,7 @@ def add_field(model_cls: type[BaseModel], name: str, field: Field):
isinstance(getattr(model, name, None), fields.Field)
for model in [model_cls] + [model_cls.pool[inherit] for inherit in model_cls._inherits]
)
if not (is_class_field or name.startswith('x_')):
if not (is_class_field or model_cls.pool['ir.model.fields']._is_manual_name(None, name)):
raise ValidationError( # pylint: disable=missing-gettext
f"The field `{name}` is not defined in the `{model_cls._name}` Python class and does not start with 'x_'"
)

View file

@ -3119,6 +3119,7 @@ class BaseModel(metaclass=MetaModel):
""" SELECT a.attname, a.attnotnull
FROM pg_class c, pg_attribute a
WHERE c.relname=%s
AND c.relnamespace = current_schema::regnamespace
AND c.oid=a.attrelid
AND a.attisdropped=%s
AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')
@ -3409,6 +3410,8 @@ class BaseModel(metaclass=MetaModel):
if self.env.user._has_group('base.group_no_one'):
if field.groups == NO_ACCESS:
allowed_groups_msg = _("always forbidden")
elif not field.groups:
allowed_groups_msg = _("custom field access rules")
else:
groups_list = [self.env.ref(g) for g in field.groups.split(',')]
groups = self.env['res.groups'].union(*groups_list).sorted('id')
@ -3648,7 +3651,7 @@ class BaseModel(metaclass=MetaModel):
for lang, _translations in translations.items():
_old_translations = {src: values[lang] for src, values in old_translation_dictionary.items() if lang in values}
_new_translations = {**_old_translations, **_translations}
new_values[lang] = field.translate(_new_translations.get, old_source_lang_value)
new_values[lang] = field.convert_to_cache(field.translate(_new_translations.get, old_source_lang_value), self)
field._update_cache(self.with_context(prefetch_langs=True), new_values, dirty=True)
# the following write is incharge of
@ -4149,7 +4152,7 @@ class BaseModel(metaclass=MetaModel):
if any(self._ids):
Rule = self.env['ir.rule']
domain = Rule._compute_domain(self._name, operation)
if domain and (forbidden := self - self.sudo().filtered_domain(domain)):
if domain and (forbidden := self - self.sudo().with_context(active_test=False).filtered_domain(domain)):
return forbidden, functools.partial(Rule._make_access_error, operation, forbidden)
return None
@ -5378,7 +5381,10 @@ class BaseModel(metaclass=MetaModel):
# add order and limits
if order:
query.order = self._order_to_sql(order, query)
if limit is not None:
# In RPC, None is not available; False is used instead to mean "no limit"
# Note: True is kept for backward-compatibility (treated as 1)
if limit is not None and limit is not False:
query.limit = limit
if offset is not None:
query.offset = offset
@ -7115,6 +7121,7 @@ def get_columns_from_sql_diagnostics(cr, diagnostics, *, check_registry=False) -
JOIN pg_class t ON t.oid = conrelid
WHERE conname = %s
AND t.relname = %s
AND t.relnamespace = current_schema::regnamespace
""", diagnostics.constraint_name, diagnostics.table_name))
columns = cr.fetchone()
return columns[0] if columns else []

View file

@ -58,7 +58,7 @@ _REGISTRY_CACHES = {
'routing': 1024, # 2 entries per website
'routing.rewrites': 8192, # url_rewrite entries
'templates.cached_values': 2048, # arbitrary
'groups': 8, # see res.groups
'groups': 64, # see res.groups
}
# cache invalidation dependencies, as follows:
@ -135,6 +135,7 @@ class Registry(Mapping[str, type["BaseModel"]]):
upgrade_modules: Collection[str] = (),
reinit_modules: Collection[str] = (),
new_db_demo: bool | None = None,
models_to_check: set[str] | None = None,
) -> Registry:
"""Create and return a new registry for the given database name.
@ -172,6 +173,19 @@ class Registry(Mapping[str, type["BaseModel"]]):
cls.registries[db_name] = registry # pylint: disable=unsupported-assignment-operation
try:
registry.setup_signaling()
with registry.cursor() as cr:
# This transaction defines a critical section for multi-worker concurrency control.
# When the transaction commits, the first worker proceeds to upgrade modules. Other workers
# encounter a serialization error and retry, finding no upgrade marker in the database.
# This significantly reduces the likelihood of concurrent module upgrades across workers.
# NOTE: This block is intentionally outside the try-except below to prevent workers that fail
# due to serialization errors from calling `reset_modules_state` while the first worker is
# actively upgrading modules.
from odoo.modules import db # noqa: PLC0415
if db.is_initialized(cr):
cr.execute("DELETE FROM ir_config_parameter WHERE key='base.partially_updated_database'")
if cr.rowcount:
update_module = True
# This should be a method on Registry
from odoo.modules.loading import load_modules, reset_modules_state # noqa: PLC0415
exit_stack = ExitStack()
@ -189,6 +203,7 @@ class Registry(Mapping[str, type["BaseModel"]]):
install_modules=install_modules,
reinit_modules=reinit_modules,
new_db_demo=new_db_demo,
models_to_check=models_to_check,
)
except Exception:
reset_modules_state(db_name)
@ -686,6 +701,8 @@ class Registry(Mapping[str, type["BaseModel"]]):
# not already marked as "to be applied".
with cr.savepoint(flush=False):
func(cr)
else:
self._constraint_queue[key] = func
except Exception as e:
if self._is_install:
_schema.error(*e.args)
@ -767,8 +784,7 @@ class Registry(Mapping[str, type["BaseModel"]]):
SELECT c.relname, a.attname
FROM pg_attribute a
JOIN pg_class c ON a.attrelid = c.oid
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'public'
WHERE c.relnamespace = current_schema::regnamespace
AND a.attnotnull = true
AND a.attnum > 0
AND a.attname != 'id';
@ -803,7 +819,8 @@ class Registry(Mapping[str, type["BaseModel"]]):
return
# retrieve existing indexes with their corresponding table
cr.execute("SELECT indexname, tablename FROM pg_indexes WHERE indexname IN %s",
cr.execute("SELECT indexname, tablename FROM pg_indexes WHERE indexname IN %s"
" AND schemaname = current_schema",
[tuple(row[0] for row in expected)])
existing = dict(cr.fetchall())
@ -883,6 +900,7 @@ class Registry(Mapping[str, type["BaseModel"]]):
JOIN pg_attribute AS a1 ON a1.attrelid = c1.oid AND fk.conkey[1] = a1.attnum
JOIN pg_attribute AS a2 ON a2.attrelid = c2.oid AND fk.confkey[1] = a2.attnum
WHERE fk.contype = 'f' AND c1.relname IN %s
AND c1.relnamespace = current_schema::regnamespace
"""
cr.execute(query, [tuple({table for table, column in self._foreign_keys})])
existing = {
@ -969,10 +987,9 @@ class Registry(Mapping[str, type["BaseModel"]]):
query = """
SELECT c.relname
FROM pg_class c
JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE c.relname IN %s
AND c.relkind = 'r'
AND n.nspname = 'public'
AND c.relnamespace = current_schema::regnamespace
"""
tables = tuple(m._table for m in self.models.values())
cr.execute(query, [tables])
@ -1006,7 +1023,8 @@ class Registry(Mapping[str, type["BaseModel"]]):
# The `orm_signaling_...` sequences indicates when caches must
# be invalidated (i.e. cleared).
signaling_tables = tuple(f'orm_signaling_{cache_name}' for cache_name in ['registry', *_CACHES_BY_KEY])
cr.execute("SELECT table_name FROM information_schema.tables WHERE table_name IN %s", [signaling_tables])
cr.execute("SELECT table_name FROM information_schema.tables"
" WHERE table_name IN %s AND table_schema = current_schema", [signaling_tables])
existing_sig_tables = tuple(s[0] for s in cr.fetchall()) # could be a set but not efficient with such a little list
# signaling was previously using sequence but this doesn't work with replication