19.0 vanilla

This commit is contained in:
Ernad Husremovic 2026-03-09 09:30:27 +01:00
parent d1963a3c3a
commit 2d3ee4855a
7430 changed files with 2687981 additions and 2965473 deletions

View file

@ -75,11 +75,11 @@ class UserAgentParser(object):
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [
self.platforms = tuple((b, re.compile(a, re.I)) for a, b in self.platforms)
self.browsers = tuple(
(b, re.compile(self._browser_version_re % a, re.I))
for a, b in self.browsers
]
)
def __call__(self, user_agent):
for platform, regex in self.platforms: # noqa: B007

View file

@ -234,6 +234,8 @@ class configmanager:
help="install demo data in new databases")
group.add_option("--without-demo", dest="with_demo", type='without_demo', metavar='BOOL', nargs='?', const=True,
help="don't install demo data in new databases (default)")
group.add_option("--skip-auto-install", dest="skip_auto_install", action="store_true", my_default=False,
help="skip the automatic installation of modules marked as auto_install")
group.add_option("-P", "--import-partial", dest="import_partial", type='path', my_default='', file_loadable=False,
help="Use this for big data importation, if it crashes you will be able to continue at the current state. Provide a filename to store intermediate importation states.")
group.add_option("--pidfile", dest="pidfile", type='path', my_default='',
@ -282,7 +284,7 @@ class configmanager:
help="Launch a python test file.")
group.add_option("--test-enable", dest='test_enable', action="store_true", file_loadable=False,
help="Enable unit tests. Implies --stop-after-init")
group.add_option("--test-tags", dest="test_tags", file_loadable=False,
group.add_option("-t", "--test-tags", dest="test_tags", file_loadable=False,
help="Comma-separated list of specs to filter which tests to execute. Enable unit tests if set. "
"A filter spec has the format: [-][tag][/module][:class][.method][[params]] "
"The '-' specifies if we want to include or exclude tests matching this spec. "

View file

@ -510,10 +510,16 @@ form: module.record_id""" % (xml_id,)
record.append(Field(name='website_id', ref=el.get('website_id')))
if 'key' in el.attrib:
record.append(Field(el.get('key'), name='key'))
# If the "active" value is set on the root node (instead of an inner
# <field>), it is treated as the value for the "active" field but only
# when *not updating*. This allows to update the record in a more recent
# version without changing its active state (compatibility).
if el.get('active') in ("True", "False"):
view_id = self.id_get(tpl_id, raise_if_not_found=False)
if self.mode != "update" or not view_id:
record.append(Field(name='active', eval=el.get('active')))
if el.get('customize_show') in ("True", "False"):
record.append(Field(name='customize_show', eval=el.get('customize_show')))
groups = el.attrib.pop('groups', None)
@ -536,6 +542,46 @@ form: module.record_id""" % (xml_id,)
return self._tag_record(record)
def _tag_asset(self, el):
"""
Transforms an <asset> element into a <record> and forwards it.
"""
asset_id = el.get('id')
Field = builder.E.field
record = etree.Element('record', attrib={
'id': asset_id,
'model': 'theme.ir.asset' if self.module.startswith('theme_') else 'ir.asset',
})
name = el.get('name', asset_id)
record.append(Field(name, name='name'))
# E.g. <bundle directive="prepend">web.assets_frontend</bundle>
# (directive is optional)
bundle_el = el.find('bundle')
record.append(Field(bundle_el.text, name='bundle'))
if 'directive' in bundle_el.attrib:
record.append(Field(bundle_el.get('directive'), name='directive'))
# E.g. <path>website/static/src/snippets/s_share/000.scss</path>
record.append(Field(el.find('path').text, name='path'))
# Same as <template> for ir.ui.view:
# If the "active" value is set on the root node (instead of an inner
# <field>), it is treated as the value for the "active" field but only
# when *not updating*. This allows to update the record in a more recent
# version without changing its active state (compatibility).
if el.get('active') in ("True", "False"):
record_id = self.id_get(asset_id, raise_if_not_found=False)
if self.mode != "update" or not record_id:
record.append(Field(name='active', eval=el.get('active')))
for child in el.iterchildren('field'):
record.append(child)
return self._tag_record(record)
def id_get(self, id_str, raise_if_not_found=True):
id_str = self.make_xml_id(id_str)
if id_str in self.idref:
@ -607,6 +653,7 @@ form: module.record_id""" % (xml_id,)
'function': self._tag_function,
'menuitem': self._tag_menuitem,
'template': self._tag_template,
'asset': self._tag_asset,
**dict.fromkeys(self.DATA_ROOTS, self._tag_root)
}

View file

@ -13,10 +13,11 @@ There is also one permanent generation that is never collected (see
The GC is triggered by the number of created objects. For the first collection,
at every allocation and deallocation, a counter is respectively increased and
decreased. Once it reaches a threshold, that collection is automatically
collected. Other thresolds indicate that every X collections, the next
collection is collected.
Default thresolds are 700, 10, 10.
collected.
Before 3.14, other thresolds indicate that every X collections, the next
collection is collected. Since the, there is only one additional collection
which is collected inrementally; `1 / threshold1` percent of the heap is
collected.
"""
import contextlib
import gc
@ -39,6 +40,7 @@ def _timing_gc_callback(event, info):
gen = info['generation']
if event == 'start':
_gc_start = _gc_time()
# python 3.14; gen2 is only collected when calling gc.collect() manually
if gen == 2 and _logger.isEnabledFor(logging.DEBUG):
_logger.debug("info %s, starting collection of gen2", gc_info())
else:

View file

@ -66,7 +66,10 @@ def format_list(
# Some styles could be unavailable for the chosen locale
if style not in locale.list_patterns:
style = "standard"
return lists.format_list([str(el) for el in lst], style, locale)
try:
return lists.format_list([str(el) for el in lst], style, locale)
except KeyError:
return lists.format_list([str(el) for el in lst], 'standard', locale)
def py_to_js_locale(locale: str) -> str:

View file

@ -86,7 +86,7 @@ class Intervals(typing.Generic[T]):
# using 'self' and 'other' below forces normalization
bounds1 = _boundaries(self, 'start', 'stop')
bounds2 = _boundaries(other, 'switch', 'switch')
bounds2 = _boundaries(Intervals(other, keep_distinct=self._keep_distinct), 'switch', 'switch')
start = None # set by start/stop
recs1 = None # set by start

View file

@ -659,7 +659,7 @@ def convert_unnamed_relative_import(content):
// after
require("some/path")
"""
repl = r"require(\g<path>)"
repl = r"\g<space>require(\g<path>)"
return IMPORT_UNNAMED_RELATIVE_RE.sub(repl, content)

View file

@ -80,7 +80,8 @@ safe_attrs = defs.safe_attrs | frozenset(
'data-ai-field', 'data-ai-record-id',
'data-heading-link-id',
'data-mimetype-before-conversion',
'data-language-id', 'data-syntax-highlighting-value'
'data-language-id',
'data-bs-toggle', # support nav-tabs
])
SANITIZE_TAGS = {
# allow new semantic HTML5 tags
@ -386,6 +387,8 @@ def html_normalize(src, filter_callback=None, output_method="html"):
for el in doc.iter(tag=etree.Element):
tag_quote(el)
doc = html.fromstring(html.tostring(doc, method=output_method))
if filter_callback:
doc = filter_callback(doc)

View file

@ -16,6 +16,7 @@ __all__ = ['guess_mimetype']
_logger = logging.getLogger(__name__)
_logger_guess_mimetype = _logger.getChild('guess_mimetype')
MIMETYPE_HEAD_SIZE = 2048
# We define our own guess_mimetype implementation and if magic is available we
# use it instead.
@ -150,11 +151,13 @@ _mime_mappings = (
# zip, but will include jar, odt, ods, odp, docx, xlsx, pptx, apk
_Entry('application/zip', [b'PK\x03\x04'], [_check_ooxml, _check_open_container_format]),
)
def _odoo_guess_mimetype(bin_data, default='application/octet-stream'):
def _odoo_guess_mimetype(bin_data: bytes, default='application/octet-stream'):
""" Attempts to guess the mime type of the provided binary data, similar
to but significantly more limited than libmagic
:param str bin_data: binary data to try and guess a mime type for
:param bin_data: binary data to try and guess a mime type for
:returns: matched mimetype or ``application/octet-stream`` if none matched
"""
# by default, guess the type using the magic number of file hex signature (like magic, but more limited)
@ -188,8 +191,10 @@ try:
import magic
def guess_mimetype(bin_data, default=None):
if isinstance(bin_data, bytearray):
bin_data = bytes(bin_data[:1024])
mimetype = magic.from_buffer(bin_data[:1024], mime=True)
bin_data = bytes(bin_data[:MIMETYPE_HEAD_SIZE])
elif not isinstance(bin_data, bytes):
raise TypeError('`bin_data` must be bytes or bytearray')
mimetype = magic.from_buffer(bin_data[:MIMETYPE_HEAD_SIZE], mime=True)
if mimetype in ('application/CDFV2', 'application/x-ole-storage'):
# Those are the generic file format that Microsoft Office
# was using before 2006, use our own check to further

View file

@ -29,7 +29,7 @@ from collections import defaultdict
from collections.abc import Iterable, Iterator, Mapping, MutableMapping, MutableSet, Reversible
from contextlib import ContextDecorator, contextmanager
from difflib import HtmlDiff
from functools import reduce, wraps
from functools import lru_cache, reduce, wraps
from itertools import islice, groupby as itergroupby
from operator import itemgetter
@ -229,7 +229,7 @@ def file_path(file_path: str, filter_ext: tuple[str, ...] = ('',), env: Environm
addons_paths = list(map(os.path.dirname, module.__path__))
else:
root_path = os.path.abspath(config.root_path)
temporary_paths = env.transaction._Transaction__file_open_tmp_paths if env else ()
temporary_paths = env.transaction._Transaction__file_open_tmp_paths if env else []
addons_paths = [*odoo.addons.__path__, root_path, *temporary_paths]
for addons_dir in addons_paths:
@ -305,13 +305,12 @@ def file_open_temporary_directory(env: Environment):
:param env: environment for which the temporary directory is created.
:return: the absolute path to the created temporary directory
"""
assert not env.transaction._Transaction__file_open_tmp_paths, 'Reentrancy is not implemented for this method'
with tempfile.TemporaryDirectory() as module_dir:
try:
env.transaction._Transaction__file_open_tmp_paths = (module_dir,)
env.transaction._Transaction__file_open_tmp_paths.append(module_dir)
yield module_dir
finally:
env.transaction._Transaction__file_open_tmp_paths = ()
env.transaction._Transaction__file_open_tmp_paths.remove(module_dir)
#----------------------------------------------------------
@ -814,7 +813,9 @@ class lower_logging(logging.Handler):
record.levelname = f'_{record.levelname}'
record.levelno = self.to_level
self.had_error_log = True
record.args = tuple(arg.replace('Traceback (most recent call last):', '_Traceback_ (most recent call last):') if isinstance(arg, str) else arg for arg in record.args)
if MungedTracebackLogRecord.__base__ is logging.LogRecord:
MungedTracebackLogRecord.__bases__ = (record.__class__,)
record.__class__ = MungedTracebackLogRecord
if logging.getLogger(record.name).isEnabledFor(record.levelno):
for handler in self.old_handlers:
@ -822,6 +823,14 @@ class lower_logging(logging.Handler):
handler.emit(record)
class MungedTracebackLogRecord(logging.LogRecord):
def getMessage(self):
return super().getMessage().replace(
'Traceback (most recent call last):',
'_Traceback_ (most recent call last):',
)
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init', '--i18n-overwrite']))
@ -1161,6 +1170,9 @@ class Callbacks:
self._funcs.clear()
self.data.clear()
def __len__(self) -> int:
return len(self._funcs)
class ReversedIterable(Reversible[T], typing.Generic[T]):
""" An iterable implementing the reversal of another iterable. """
@ -1304,6 +1316,7 @@ def get_lang(env: Environment, lang_code: str | None = None) -> LangData:
return env['res.lang']._get_data(code=lang)
@lru_cache
def babel_locale_parse(lang_code: str | None) -> babel.Locale:
if lang_code:
try:

View file

@ -59,6 +59,7 @@ def zip_dir(path, stream, include_dir=True, fnct_sort=None): # TODO add ign
if len_prefix:
len_prefix += 1
dir_root_path = os.path.realpath(path)
with zipfile.ZipFile(stream, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zipf:
for dirpath, _dirnames, filenames in os.walk(path):
filenames = sorted(filenames, key=fnct_sort)
@ -66,9 +67,10 @@ def zip_dir(path, stream, include_dir=True, fnct_sort=None): # TODO add ign
bname, ext = os.path.splitext(fname)
ext = ext or bname
if ext not in ['.pyc', '.pyo', '.swp', '.DS_Store']:
path = os.path.normpath(os.path.join(dirpath, fname))
if os.path.isfile(path):
zipf.write(path, path[len_prefix:])
fpath = os.path.normpath(os.path.join(dirpath, fname))
real_fpath = os.path.realpath(fpath)
if os.path.isfile(real_fpath) and os.path.commonpath([dir_root_path, real_fpath]) == dir_root_path:
zipf.write(real_fpath, fpath[len_prefix:])
if os.name != 'nt':

View file

@ -108,6 +108,16 @@ def _unwrapping_get(self, key, default=None):
DictionaryObject.get = _unwrapping_get
if hasattr(NameObject, 'renumber_table'):
# Make sure all the correct delimiters are included
# We will make this change only if pypdf has the renumber_table attribute
# https://github.com/py-pdf/pypdf/commit/8c542f331828c5839fda48442d89b8ac5d3984ac
NameObject.renumber_table.update({
**{chr(i): f"#{i:02X}".encode() for i in b"#()<>[]{}/%"},
**{chr(i): f"#{i:02X}".encode() for i in range(33)},
})
if hasattr(PdfWriter, 'write_stream'):
# >= 2.x has a utility `write` which can open a path, so `write_stream` could be called directly
class BrandedFileWriter(PdfWriter):
@ -207,16 +217,21 @@ def rotate_pdf(pdf):
return _buffer.getvalue()
def to_pdf_stream(attachment) -> io.BytesIO:
def to_pdf_stream(attachment) -> io.BytesIO | None:
"""Get the byte stream of the attachment as a PDF."""
if not attachment.raw:
_logger.warning("%s has no raw data.", attachment)
return None
if attachment_raw := attachment._get_pdf_raw():
return io.BytesIO(attachment_raw)
stream = io.BytesIO(attachment.raw)
if attachment.mimetype == 'application/pdf':
return stream
elif attachment.mimetype.startswith('image'):
if attachment.mimetype.startswith('image'):
output_stream = io.BytesIO()
Image.open(stream).convert("RGB").save(output_stream, format="pdf")
return output_stream
_logger.warning("mimetype (%s) not recognized for %s", attachment.mimetype, attachment)
return None
def extract_page(attachment, num_page=0) -> io.BytesIO | None:
@ -383,8 +398,8 @@ class OdooPdfFileWriter(PdfFileWriter):
adapted_subtype = subtype
if REGEX_SUBTYPE_UNFORMATED.match(subtype):
# _pypdf2_2 does the formating when creating a NameObject
if SUBMOD == '._pypdf2_2':
# _pypdf2_2 and _pypdf does the formating when creating a NameObject
if SUBMOD in ('._pypdf2_2', '._pypdf'):
return '/' + subtype
adapted_subtype = '/' + subtype.replace('/', '#2F')
@ -488,16 +503,18 @@ class OdooPdfFileWriter(PdfFileWriter):
"""
# Set the PDF version to 1.7 (as PDF/A-3 is based on version 1.7) and make it PDF/A compliant.
# See https://github.com/veraPDF/veraPDF-validation-profiles/wiki/PDFA-Parts-2-and-3-rules#rule-612-1
self._header = b"%PDF-1.7"
# " The file header shall begin at byte zero and shall consist of "%PDF-1.n" followed by a single EOL marker,
# where 'n' is a single digit number between 0 (30h) and 7 (37h) "
# " The aforementioned EOL marker shall be immediately followed by a % (25h) character followed by at least four
# bytes, each of whose encoded byte values shall have a decimal value greater than 127 "
self._header = b"%PDF-1.7"
if SUBMOD != '._pypdf2_2':
self._header += b"\n"
# bytes, each of whose encoded byte values shall have a decimal value greater than 127 ".
# PyPDF2 2.X+ already adds these 4 characters by default (so ._pypdf2_2 and ._pypdf don't need it).
# The injected character `\xc3\xa9` is equivalent to the character `é`.
# Therefore, on `_pypdf2_1`, the header will look like: `%PDF-1.7\n%éééé`,
# while on `_pypdf2_2` and `_pypdf`, it will look like: `%PDF-1.7\n%âãÏÓ`.
if SUBMOD == '._pypdf2_1':
self._header += b"%\xDE\xAD\xBE\xEF"
self._header += b"\n%\xc3\xa9\xc3\xa9\xc3\xa9\xc3\xa9"
# Add a document ID to the trailer. This is only needed when using encryption with regular PDF, but is required
# when using PDF/A
@ -571,6 +588,14 @@ class OdooPdfFileWriter(PdfFileWriter):
outlines = self._root_object['/Outlines'].getObject()
outlines[NameObject('/Count')] = NumberObject(1)
# [6.7.2.2-1] include a MarkInfo dictionary containing "Marked" with true value
mark_info = DictionaryObject({NameObject("/Marked"): BooleanObject(True)})
self._root_object[NameObject("/MarkInfo")] = mark_info
# [6.7.3.3-1] include minimal document structure in the catalog
struct_tree_root = DictionaryObject({NameObject("/Type"): NameObject("/StructTreeRoot")})
self._root_object[NameObject("/StructTreeRoot")] = struct_tree_root
# Set odoo as producer
self.addMetadata({
'/Creator': "Odoo",
@ -618,7 +643,7 @@ class OdooPdfFileWriter(PdfFileWriter):
DictionaryObject({
NameObject('/CheckSum'): createStringObject(md5(attachment['content']).hexdigest()),
NameObject('/ModDate'): createStringObject(datetime.now().strftime(DEFAULT_PDF_DATETIME_FORMAT)),
NameObject('/Size'): NameObject(f"/{len(attachment['content'])}"),
NameObject('/Size'): NumberObject(len(attachment['content'])),
}),
})
if attachment.get('subtype'):

View file

@ -124,6 +124,7 @@ class PopulateContext:
SELECT indexname AS name, indexdef AS definition
FROM pg_indexes
WHERE tablename = %s
AND schemaname = current_schema
AND indexname NOT LIKE %s
AND indexdef NOT LIKE %s
""", model._table, '%pkey', '%UNIQUE%'))
@ -182,6 +183,7 @@ def field_needs_variation(model: Model, field: Field) -> bool:
JOIN pg_attribute a ON a.attnum = ANY (idx.indkey) AND a.attrelid = t.oid
WHERE t.relname = %s -- tablename
AND a.attname = %s -- column
AND t.relnamespace = current_schema::regnamespace
AND idx.indisunique = TRUE) AS is_unique;
""", model_._table, field_.name)
return model_.env.execute_query(query)[0][0]

View file

@ -129,9 +129,10 @@ class Collector:
def progress(self, entry=None, frame=None):
""" Checks if the limits were met and add to the entries"""
if self.profiler.entry_count_limit \
and self.profiler.entry_count() >= self.profiler.entry_count_limit:
and self.profiler.counter >= self.profiler.entry_count_limit:
self.profiler.end()
return
self.profiler.counter += 1
self.add(entry=entry,frame=frame)
def _get_stack_trace(self, frame=None):
@ -149,8 +150,10 @@ class Collector:
""" Return the entries of the collector after postprocessing. """
if not self._processed:
self.post_process()
self.processed_entries = self._entries
self._entries = None # avoid modification after processing
self._processed = True
return self._entries
return self.processed_entries
def summary(self):
return f"{'='*10} {self.name} {'='*10} \n Entries: {len(self._entries)}"
@ -203,6 +206,7 @@ class _BasePeriodicCollector(Collector):
self.frame_interval = interval or self._default_interval
self.__thread = threading.Thread(target=self.run)
self.last_frame = None
self._stop_event = threading.Event()
def start(self):
interval = self.profiler.params.get(f'{self.name}_interval')
@ -219,13 +223,14 @@ class _BasePeriodicCollector(Collector):
self.last_time = real_time()
while self.active: # maybe add a check on parent_thread state?
self.progress()
time.sleep(self.frame_interval)
self._entries.append({'stack': [], 'start': real_time()}) # add final end frame
self._stop_event.wait(self.frame_interval)
def stop(self):
self.active = False
self.__thread.join()
self._stop_event.set()
self._entries.append({'stack': [], 'start': real_time()}) # add final end frame
if self.__thread.is_alive() and self.__thread is not threading.current_thread():
self.__thread.join()
self.profiler.init_thread.profile_hooks.remove(self.progress)
@ -279,9 +284,9 @@ class MemoryCollector(_BasePeriodicCollector):
})
def stop(self):
super().stop()
_lock.release()
tracemalloc.stop()
super().stop()
def post_process(self):
for i, entry in enumerate(self._entries):
@ -552,6 +557,7 @@ class Profiler:
self.entry_count_limit = int(self.params.get("entry_count_limit",0)) # the limit could be set using a smarter way
self.done = False
self.exit_stack = ExitStack()
self.counter = 0
if db is ...:
# determine database from current thread

View file

@ -206,10 +206,9 @@ def existing_tables(cr, tablenames):
cr.execute(SQL("""
SELECT c.relname
FROM pg_class c
JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE c.relname IN %s
AND c.relkind IN ('r', 'v', 'm')
AND n.nspname = current_schema
AND c.relnamespace = current_schema::regnamespace
""", tuple(tablenames)))
return [row[0] for row in cr.fetchall()]
@ -236,9 +235,8 @@ def table_kind(cr, tablename: str) -> TableKind | None:
cr.execute(SQL("""
SELECT c.relkind, c.relpersistence
FROM pg_class c
JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE c.relname = %s
AND n.nspname = current_schema
AND c.relnamespace = current_schema::regnamespace
""", tablename))
if not cr.rowcount:
return None
@ -307,7 +305,8 @@ def table_columns(cr, tablename):
# might prevent a postgres user to read this field.
cr.execute(SQL(
''' SELECT column_name, udt_name, character_maximum_length, is_nullable
FROM information_schema.columns WHERE table_name=%s ''',
FROM information_schema.columns WHERE table_name=%s
AND table_schema = current_schema ''',
tablename,
))
return {row['column_name']: row for row in cr.dictfetchall()}
@ -317,7 +316,8 @@ def column_exists(cr, tablename, columnname):
""" Return whether the given column exists. """
cr.execute(SQL(
""" SELECT 1 FROM information_schema.columns
WHERE table_name=%s AND column_name=%s """,
WHERE table_name=%s AND column_name=%s
AND table_schema = current_schema """,
tablename, columnname,
))
return cr.rowcount
@ -408,6 +408,7 @@ def get_depending_views(cr, table, column):
JOIN pg_attribute ON pg_depend.refobjid = pg_attribute.attrelid
AND pg_depend.refobjsubid = pg_attribute.attnum
WHERE dependent.relname = %s
AND dependent.relnamespace = current_schema::regnamespace
AND pg_attribute.attnum > 0
AND pg_attribute.attname = %s
AND dependee.relkind in ('v', 'm')
@ -442,6 +443,7 @@ def constraint_definition(cr, tablename, constraintname):
JOIN pg_class t ON t.oid = c.conrelid
LEFT JOIN pg_description d ON c.oid = d.objoid
WHERE t.relname = %s AND conname = %s
AND t.relnamespace = current_schema::regnamespace
""", tablename, constraintname))
return cr.fetchone()[0] if cr.rowcount else None
@ -497,6 +499,7 @@ def get_foreign_keys(cr, tablename1, columnname1, tablename2, columnname2, ondel
AND a1.attname = %s
AND c2.relname = %s
AND a2.attname = %s
AND c1.relnamespace = current_schema::regnamespace
AND fk.confdeltype = %s
""",
tablename1, columnname1, tablename2, columnname2, deltype,
@ -518,7 +521,8 @@ def fix_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondele
AND array_lower(con.conkey, 1)=1 AND con.conkey[1]=a1.attnum
AND array_lower(con.confkey, 1)=1 AND con.confkey[1]=a2.attnum
AND a1.attrelid=c1.oid AND a2.attrelid=c2.oid
AND c1.relname=%s AND a1.attname=%s """,
AND c1.relname=%s AND a1.attname=%s
AND c1.relnamespace = current_schema::regnamespace """,
tablename1, columnname1,
))
found = False
@ -535,7 +539,8 @@ def fix_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondele
def index_exists(cr, indexname):
""" Return whether the given index exists. """
cr.execute(SQL("SELECT 1 FROM pg_indexes WHERE indexname=%s", indexname))
cr.execute(SQL("SELECT 1 FROM pg_indexes WHERE indexname=%s"
" AND schemaname = current_schema", indexname))
return cr.rowcount
@ -551,6 +556,7 @@ def index_definition(cr, indexname):
JOIN pg_indexes idx ON c.relname = idx.indexname
LEFT JOIN pg_description d ON c.oid = d.objoid
WHERE c.relname = %s AND c.relkind = 'i'
AND c.relnamespace = current_schema::regnamespace
""", indexname))
return cr.fetchone() if cr.rowcount else (None, None)

View file

@ -83,6 +83,8 @@ def locate_node(arch, spec):
"""
if spec.tag == 'xpath':
expr = spec.get('expr')
if expr is None:
raise ValidationError(_lt("Missing 'expr' attribute in xpath specification"))
try:
xPath = etree.ETXPath(expr)
except etree.XPathSyntaxError as e:

View file

@ -74,7 +74,7 @@ TRANSLATED_ELEMENTS = {
TRANSLATED_ATTRS = {
'string', 'add-label', 'help', 'sum', 'avg', 'confirm', 'placeholder', 'alt', 'title', 'aria-label',
'aria-keyshortcuts', 'aria-placeholder', 'aria-roledescription', 'aria-valuetext',
'value_label', 'data-tooltip', 'label', 'confirm-label', 'cancel-label',
'value_label', 'data-tooltip', 'label', 'confirm-label', 'confirm-title', 'cancel-label',
}
TRANSLATED_ATTRS.update({f't-attf-{attr}' for attr in TRANSLATED_ATTRS})
@ -86,10 +86,12 @@ FIELD_TRANSLATE = {
}
def is_translatable_attrib(key):
def is_translatable_attrib(key, node):
if not key:
return False
return key in TRANSLATED_ATTRS or key.endswith('.translate')
if 't-call' not in node.attrib and key in TRANSLATED_ATTRS:
return True
return key.endswith('.translate')
def is_translatable_attrib_value(node):
# check if the value attribute of a node must be translated
@ -148,44 +150,54 @@ def translate_xml_node(node, callback, parse, serialize):
""" Return whether ``text`` is a string with non-space characters. """
return bool(text) and not space_pattern.fullmatch(text)
def translatable(node):
def is_force_inline(node):
""" Return whether ``node`` is marked as it should be translated as
one term.
"""
return "o_translate_inline" in node.attrib.get("class", "").split()
def translatable(node, force_inline=False):
""" Return whether the given node can be translated as a whole. """
# Some specific nodes (e.g., text highlights) have an auto-updated DOM
# structure that makes them impossible to translate.
# The introduction of a translation `<span>` in the middle of their
# hierarchy breaks their functionalities. We need to force them to be
# translated as a whole using the `o_translate_inline` class.
force_inline = force_inline or is_force_inline(node)
return (
# Some specific nodes (e.g., text highlights) have an auto-updated
# DOM structure that makes them impossible to translate.
# The introduction of a translation `<span>` in the middle of their
# hierarchy breaks their functionalities. We need to force them to
# be translated as a whole using the `o_translate_inline` class.
"o_translate_inline" in node.attrib.get("class", "").split()
or node.tag in TRANSLATED_ELEMENTS
and not any(key.startswith("t-") or key.endswith(".translate") for key in node.attrib)
and all(translatable(child) for child in node)
(force_inline or node.tag in TRANSLATED_ELEMENTS)
# Nodes with directives are not translatable. Directives usually
# start with `t-`, but this prefix is optional for `groups` (see
# `_compile_directive_groups` which reads `t-groups` and `groups`)
and not any(key.startswith("t-") or key == 'groups' or key.endswith(".translate") for key in node.attrib)
and all(translatable(child, force_inline) for child in node)
)
def hastext(node, pos=0):
def hastext(node, pos=0, force_inline=False):
""" Return whether the given node contains some text to translate at the
given child node position. The text may be before the child node,
inside it, or after it.
"""
force_inline = force_inline or is_force_inline(node)
return (
# there is some text before node[pos]
nonspace(node[pos-1].tail if pos else node.text)
or (
pos < len(node)
and translatable(node[pos])
and translatable(node[pos], force_inline)
and (
any( # attribute to translate
val and (
is_translatable_attrib(key) or
is_translatable_attrib(key, node) or
(key == 'value' and is_translatable_attrib_value(node[pos])) or
(key == 'text' and is_translatable_attrib_text(node[pos]))
)
for key, val in node[pos].attrib.items()
)
# node[pos] contains some text to translate
or hastext(node[pos])
or hastext(node[pos], 0, force_inline)
# node[pos] has no text, but there is some text after it
or hastext(node, pos + 1)
or hastext(node, pos + 1, force_inline)
)
)
)
@ -196,7 +208,7 @@ def translate_xml_node(node, callback, parse, serialize):
isinstance(node, SKIPPED_ELEMENT_TYPES)
or node.tag in SKIPPED_ELEMENTS
or node.get('t-translation', "").strip() == "off"
or node.tag == 'attribute' and node.get('name') not in ('value', 'text') and not is_translatable_attrib(node.get('name'))
or node.tag == 'attribute' and node.get('name') not in ('value', 'text') and not is_translatable_attrib(node.get('name'), node)
or node.getparent() is None and avoid_pattern.match(node.text or "")
):
return
@ -209,7 +221,7 @@ def translate_xml_node(node, callback, parse, serialize):
# into a <div> element
div = etree.Element('div')
div.text = (node[pos-1].tail if pos else node.text) or ''
while pos < len(node) and translatable(node[pos]):
while pos < len(node) and translatable(node[pos], is_force_inline(node)):
div.append(node[pos])
# translate the content of the <div> element as a whole
@ -246,7 +258,7 @@ def translate_xml_node(node, callback, parse, serialize):
for key, val in node.attrib.items():
if nonspace(val):
if (
is_translatable_attrib(key) or
is_translatable_attrib(key, node) or
(key == 'value' and is_translatable_attrib_value(node)) or
(key == 'text' and is_translatable_attrib_text(node))
):
@ -421,10 +433,10 @@ def get_translation(module: str, lang: str, source: str, args: tuple | dict) ->
args = {k: v._translate(lang) if isinstance(v, LazyGettext) else v for k, v in args.items()}
else:
args = tuple(v._translate(lang) if isinstance(v, LazyGettext) else v for v in args)
if any(isinstance(a, Iterable) and not isinstance(a, str) for a in (args.values() if args_is_dict else args)):
if any(isinstance(a, Iterable) and not isinstance(a, (str, bytes)) for a in (args.values() if args_is_dict else args)):
# automatically format list-like arguments in a localized way
def process_translation_arg(v):
return format_list(env=None, lst=v, lang_code=lang) if isinstance(v, Iterable) and not isinstance(v, str) else v
return format_list(env=None, lst=v, lang_code=lang) if isinstance(v, Iterable) and not isinstance(v, (str, bytes)) else v
if args_is_dict:
args = {k: process_translation_arg(v) for k, v in args.items()}
else:
@ -1069,7 +1081,7 @@ def _extract_translatable_qweb_terms(element, callback):
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not (el.tag == 'attribute' and not is_translatable_attrib(el.get('name')))
and not (el.tag == 'attribute' and not is_translatable_attrib(el.get('name'), el))
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
@ -1459,7 +1471,7 @@ class TranslationModuleReader(TranslationReader):
"""
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
for bin_path in ['orm', 'osv', 'report', 'modules', 'service', 'tools']:
self._path_list.append((os.path.join(config.root_path, bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
@ -1656,15 +1668,20 @@ class TranslationImporter:
translations.update({k: v for k, v in translation_dictionary[term_en].items() if v != term_en})
translation_dictionary[term_en] = translations
changed_values = {}
for lang in langs:
# translate and confirm model_terms translations
values[lang] = field.translate(lambda term: translation_dictionary.get(term, {}).get(lang), _value_en)
values.pop(f'_{lang}', None)
params.extend((id_, Json(values)))
new_val = field.translate(lambda term: translation_dictionary.get(term, {}).get(lang), _value_en)
if values.get(lang, None) != new_val:
changed_values[lang] = new_val
if f'_{lang}' in values:
changed_values[f'_{lang}'] = None
if changed_values:
params.extend((id_, Json(changed_values)))
if params:
env.cr.execute(f"""
UPDATE "{model_table}" AS m
SET "{field_name}" = t.value
SET "{field_name}" = jsonb_strip_nulls("{field_name}" || t.value)
FROM (
VALUES {', '.join(['(%s, %s::jsonb)'] * (len(params) // 2))}
) AS t(id, value)

View file

@ -1,6 +1,5 @@
import re
import urllib.parse
from urllib.parse import _WHATWG_C0_CONTROL_OR_SPACE
__all__ = ['urljoin']
@ -63,7 +62,7 @@ def urljoin(base: str, extra: str) -> str:
if e_path:
# prevent urljoin("/", "\\example.com/") to resolve as absolute to "//example.com/" in a browser redirect
# https://github.com/mozilla-firefox/firefox/blob/5e81b64f4ed88b610eb332e103744d68ee8b6c0d/netwerk/base/nsStandardURL.cpp#L2386-L2388
e_path = e_path.lstrip('/\\' + _WHATWG_C0_CONTROL_OR_SPACE)
e_path = e_path.lstrip('/\\\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ')
path = f'{path}/{e_path}'
# normalize: foo//bar -> foo/bar