19.0 vanilla

This commit is contained in:
Ernad Husremovic 2025-10-03 18:07:25 +02:00
parent 0a7ae8db93
commit 991d2234ca
416 changed files with 646602 additions and 300844 deletions

View file

@ -7,6 +7,7 @@ from . import test_barcode
from . import test_base
from . import test_basecase
from . import test_cache
from . import test_intervals
from . import test_date_utils
from . import test_deprecation
from . import test_db_cursor
@ -19,7 +20,9 @@ from . import test_groups
from . import test_http_case
from . import test_i18n
from . import test_image
from . import test_install
from . import test_avatar_mixin
from . import test_init
from . import test_ir_actions
from . import test_ir_attachment
from . import test_ir_cron
@ -38,10 +41,11 @@ from . import test_menu
from . import test_mimetypes
from . import test_misc
from . import test_module
from . import test_module_graph
from . import test_orm
from . import test_ormcache
from . import test_osv
from . import test_overrides
from . import test_query
from . import test_qweb_field
from . import test_qweb
from . import test_res_config
@ -52,10 +56,8 @@ from . import test_sql
from . import test_translate
from . import test_tz
# from . import test_uninstall # loop
from . import test_upgrade_code
from . import test_user_has_group
from . import test_views
from . import test_xmlrpc
from . import test_res_company
from . import test_res_currency
from . import test_res_country
@ -77,3 +79,6 @@ from . import test_config_parameter
from . import test_ir_module_category
from . import test_configmanager
from . import test_num2words_ar
from . import test_cli
from . import test_signature
from . import test_import_files

View file

@ -32,6 +32,8 @@ class BaseCommon(TransactionCase):
if independent_user:
cls.env = cls.env(user=independent_user)
cls.user = cls.env.user
else:
cls.env.user.group_ids += cls.get_default_groups()
independent_company = cls.setup_independent_company()
if independent_company:
@ -50,9 +52,9 @@ class BaseCommon(TransactionCase):
'name': 'Test Partner',
})
cls.group_portal = cls.env.ref('base.group_portal')
cls.group_user = cls.env.ref('base.group_user')
cls.group_system = cls.env.ref('base.group_system')
cls.group_portal = cls.quick_ref('base.group_portal')
cls.group_user = cls.quick_ref('base.group_user')
cls.group_system = cls.quick_ref('base.group_system')
@classmethod
def default_env_context(cls):
@ -62,7 +64,7 @@ class BaseCommon(TransactionCase):
@classmethod
def setup_other_currency(cls, code, **kwargs):
rates = kwargs.pop('rates', [])
currency = cls.env['res.currency'].with_context(active_test=False).search([('name', '=', code)], limit=1)
currency = cls._enable_currency(code)
currency.rate_ids.unlink()
currency.write({
'active': True,
@ -96,7 +98,8 @@ class BaseCommon(TransactionCase):
@classmethod
def _enable_currency(cls, currency_code):
currency = cls.env['res.currency'].with_context(active_test=False).search(
[('name', '=', currency_code.upper())]
[('name', '=', currency_code.upper())],
limit=1,
)
currency.action_unarchive()
return currency
@ -145,15 +148,11 @@ class BaseCommon(TransactionCase):
**({'login': 'portal_user'} | kwargs),
)
class BaseUsersCommon(BaseCommon):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user_portal = cls._create_new_portal_user()
cls.user_internal = cls._create_new_internal_user()
def quick_ref(cls, xmlid):
"""Find the matching record, without an existence check."""
model, id = cls.env['ir.model.data']._xmlid_to_res_model_res_id(xmlid)
return cls.env[model].browse(id)
class TransactionCaseWithUserDemo(TransactionCase):
@ -176,7 +175,7 @@ class TransactionCaseWithUserDemo(TransactionCase):
'login': 'demo',
'password': 'demo',
'partner_id': cls.partner_demo.id,
'groups_id': [Command.set([cls.env.ref('base.group_user').id, cls.env.ref('base.group_partner_manager').id])],
'group_ids': [Command.set([cls.env.ref('base.group_user').id, cls.env.ref('base.group_partner_manager').id])],
})
@ -202,7 +201,7 @@ class HttpCaseWithUserDemo(HttpCase):
'login': 'demo',
'password': 'demo',
'partner_id': cls.partner_demo.id,
'groups_id': [Command.set([cls.env.ref('base.group_user').id, cls.env.ref('base.group_partner_manager').id])],
'group_ids': [Command.set([cls.env.ref('base.group_user').id, cls.env.ref('base.group_partner_manager').id])],
})
@ -225,7 +224,7 @@ class SavepointCaseWithUserDemo(TransactionCase):
'login': 'demo',
'password': 'demo',
'partner_id': cls.partner_demo.id,
'groups_id': [Command.set([cls.env.ref('base.group_user').id, cls.env.ref('base.group_partner_manager').id])],
'group_ids': [Command.set([cls.env.ref('base.group_user').id, cls.env.ref('base.group_partner_manager').id])],
})
@classmethod
@ -350,7 +349,7 @@ class TransactionCaseWithUserPortal(TransactionCase):
'login': 'portal',
'password': 'portal',
'partner_id': cls.partner_portal.id,
'groups_id': [Command.set([cls.env.ref('base.group_portal').id])],
'group_ids': [Command.set([cls.env.ref('base.group_portal').id])],
})
@ -372,7 +371,7 @@ class HttpCaseWithUserPortal(HttpCase):
'login': 'portal',
'password': 'portal',
'partner_id': cls.partner_portal.id,
'groups_id': [Command.set([cls.env.ref('base.group_portal').id])],
'group_ids': [Command.set([cls.env.ref('base.group_portal').id])],
})
@ -410,6 +409,7 @@ class MockSmtplibCase:
'smtp_from': smtp_from,
'smtp_to_list': smtp_to_list,
'from_filter': self.from_filter,
})
def set_debuglevel(self, smtp_debug):
@ -427,7 +427,7 @@ class MockSmtplibCase:
self.testing_smtp_session = TestingSMTPSession()
IrMailServer = self.env['ir.mail_server']
connect_origin = type(IrMailServer).connect
connect_origin = type(IrMailServer)._connect__
find_mail_server_origin = type(IrMailServer)._find_mail_server
# custom mock to avoid losing context
@ -443,8 +443,8 @@ class MockSmtplibCase:
with patch('smtplib.SMTP_SSL', side_effect=lambda *args, **kwargs: self.testing_smtp_session), \
patch('smtplib.SMTP', side_effect=lambda *args, **kwargs: self.testing_smtp_session), \
patch.object(modules.module, 'current_test', False), \
patch.object(type(IrMailServer), 'connect', mock_function(connect_origin)) as connect_mocked, \
patch.object(type(IrMailServer), '_disable_send', lambda _: False), \
patch.object(type(IrMailServer), '_connect__', mock_function(connect_origin)) as connect_mocked, \
patch.object(type(IrMailServer), '_find_mail_server', mock_function(find_mail_server_origin)) as find_mail_server_mocked:
self.connect_mocked = connect_mocked.mock
self.find_mail_server_mocked = find_mail_server_mocked.mock
@ -453,7 +453,7 @@ class MockSmtplibCase:
def _build_email(self, mail_from, return_path=None, **kwargs):
headers = {'Return-Path': return_path} if return_path else {}
headers.update(**kwargs.pop('headers', {}))
return self.env['ir.mail_server'].build_email(
return self.env['ir.mail_server']._build_email__(
mail_from,
kwargs.pop('email_to', 'dest@example-é.com'),
kwargs.pop('subject', 'subject'),
@ -463,8 +463,9 @@ class MockSmtplibCase:
)
def _send_email(self, msg, smtp_session):
with patch.object(modules.module, 'current_test', False):
self.env['ir.mail_server'].send_email(msg, smtp_session=smtp_session)
IrMailServer = self.env['ir.mail_server']
with patch.object(type(IrMailServer), '_disable_send', lambda _: False):
IrMailServer.send_email(msg, smtp_session=smtp_session)
return smtp_session.messages.pop()
def assertSMTPEmailsSent(self, smtp_from=None, smtp_to_list=None,
@ -483,8 +484,8 @@ class MockSmtplibCase:
:param from_filter: from_filter of the <ir.mail_server> used to send the
email. False means 'match everything';'
:param emails_count: the number of emails which should match the condition
:param msg_cc: optional check msg_cc value of email;
:param msg_to: optional check msg_to value of email;
:param msg_cc_lst: optional check msg_cc value of email;
:param msg_to_lst: optional check msg_to value of email;
:return: True if at least one email has been found with those parameters
"""
@ -509,7 +510,7 @@ class MockSmtplibCase:
if matching_emails_count != emails_count:
debug_info = '\n'.join(
f"SMTP-From: {email['smtp_from']}, SMTP-To: {email['smtp_to_list']}, "
f"Msg-From: {email['msg_from']}, From_filter: {email['from_filter']})"
f"Msg-From: {email['msg_from']}, Msg-To: {email['msg_to']}, From_filter: {email['from_filter']})"
for email in self.emails
)
self.assertEqual(

View file

@ -1,4 +1,9 @@
# This config was generated using --save in Odoo 16.0 and is used to make sure
# that an older config can load fine with a newer version.
[options]
demo = {}
geoip_database = /usr/share/GeoIP/GeoLite2-City.mmdb
osv_memory_age_limit = False
admin_passwd = admin
csv_internal_sep = ,
db_host = False
@ -10,10 +15,8 @@ db_sslmode = prefer
db_template = template0
db_user = False
dbfilter =
demo = {}
email_from = False
from_filter = False
geoip_database = /usr/share/GeoIP/GeoLite2-City.mmdb
gevent_port = 8072
http_enable = True
http_interface =
@ -32,7 +35,6 @@ log_handler = :INFO
log_level = info
logfile =
max_cron_threads = 2
osv_memory_age_limit = False
osv_memory_count_limit = 0
pg_path =
pidfile =

View file

@ -1,6 +1,7 @@
--init stock,hr
--update account,website
--without-demo rigolo
--reinit account
--with-demo
--import-partial /tmp/import-partial
--pidfile /tmp/pidfile
--load base,mail
@ -51,18 +52,13 @@
--db-template backup1706
--db_replica_host db2.localhost
--db_replica_port 2038
--db_app_name=myapp{pid}
--load-language fr_FR
--language fr_FR
--i18n-export /tmp/translate_out.csv
--i18n-import /tmp/translate_in.csv
--i18n-overwrite
--modules stock,hr,mail
--no-database-list
--dev xml,reload
--shell-interface ipython
--stop-after-init
--osv-memory-count-limit 71
--transient-age-limit 4

View file

@ -0,0 +1,69 @@
ODOO_WITH_DEMO=true
ODOO_PIDFILE=/tmp/pidfile
ODOO_DATA_DIR=/tmp/data-dir
ODOO_SERVER_WIDE_MODULES=base,mail
# test int as boolean
ODOO_HTTP_ENABLE=0
ODOO_HTTP_INTERFACE=10.0.0.254
ODOO_HTTP_PORT=6942
ODOO_GEVENT_PORT=8012
ODOO_PROXY_MODE=1
ODOO_X_SENDFILE=1
ODOO_DBFILTER=.*
ODOO_SCREENCASTS=/tmp/screencasts
ODOO_SCREENSHOTS=/tmp/screenshots
ODOO_LOGFILE=/tmp/odoo.log
ODOO_LOG_HANDLER=odoo.tools.config:DEBUG,:WARNING
# only handler is supported because we use base names on dest variable
ODOO_LOG_DB=logdb
ODOO_LOG_DB_LEVEL=debug
ODOO_LOG_LEVEL=debug
ODOO_EMAIL_FROM=admin@example.com
ODOO_FROM_FILTER=.*
ODOO_SMTP_SERVER=smtp.localhost
ODOO_SMTP_PORT=1299
ODOO_SMTP_SSL=True
ODOO_SMTP_USER=spongebob
ODOO_SMTP_PASSWORD=Tigrou0072
ODOO_SMTP_SSL_CERTIFICATE_FILENAME=/tmp/tlscert
ODOO_SMTP_SSL_PRIVATE_KEY_FILENAME=/tmp/tlskey
# use postgres environment variables
PGDATABASE=horizon
PGUSER=kiwi
PGPASSWORD=Tigrou0073
PGPATH=/tmp/pg_path
PGHOST=db.localhost
PGPORT=4269
PGSSLMODE=verify-full
PGDATABASE_TEMPLATE=backup1706
PGHOST_REPLICA=db2.localhost
PGPORT_REPLICA=2038
PGAPPNAME=envapp
ODOO_DB_MAXCONN=42
ODOO_DB_MAXCONN_GEVENT=100
ODOO_LIST_DB=false
ODOO_DEV=xml,reload
ODOO_OSV_MEMORY_COUNT_LIMIT=71
ODOO_TRANSIENT_AGE_LIMIT=4
ODOO_MAX_CRON_THREADS=4
ODOO_UNACCENT=true
ODOO_GEOIP_CITY_DB=/tmp/city.db
ODOO_GEOIP_COUNTRY_DB=/tmp/country.db
ODOO_WORKERS=92
ODOO_LIMIT_MEMORY_SOFT=1048576
ODOO_LIMIT_MEMORY_SOFT_GEVENT=1048577
ODOO_LIMIT_MEMORY_HARD=1048578
ODOO_LIMIT_MEMORY_HARD_GEVENT=1048579
ODOO_LIMIT_TIME_CPU=60
ODOO_LIMIT_TIME_REAL=61
ODOO_LIMIT_TIME_REAL_CRON=62
ODOO_LIMIT_REQUEST=100

View file

@ -0,0 +1,2 @@
[options]
db_name = db1,db2

View file

@ -4,7 +4,6 @@ admin_passwd = Tigrou007
csv_internal_sep = @
publisher_warranty_url = http://example.com
reportgz = True
root_path = /tmp/root_path
websocket_rate_limit_burst = 1
websocket_rate_limit_delay = 2
websocket_keep_alive_timeout = 600
@ -14,8 +13,7 @@ config = /tmp/config
save = True
init = stock,hr
update = account,website
without_demo = True
import_partial = /tmp/import-partial
with_demo = True
pidfile = /tmp/pidfile
addons_path = /tmp/odoo
upgrade_path = /tmp/upgrade
@ -35,9 +33,6 @@ x_sendfile = True
dbfilter = .*
# testing
test_file = /tmp/file-file
test_enable = True
test_tags = :TestMantra.test_is_extra_mile_done
screencasts = /tmp/screencasts
screenshots = /tmp/screenshots
@ -76,19 +71,13 @@ db_replica_port = 2038
# i18n
load_language = fr_FR
language = fr_FR
translate_out = /tmp/translate_out.csv
translate_in = /tmp/translate_in.csv
overwrite_existing_translations = True
translate_modules = stock,hr,mail
overwrite_existing_translations = False
# security
list_db = False
# advanced
dev_mode = xml
shell_interface = ipython
stop_after_init = True
osv_memory_count_limit = 71
transient_age_limit = 4.0
max_cron_threads = 4

View file

@ -1,40 +1,40 @@
[options]
addons_path = {root_path}/odoo/addons,{root_path}/addons
addons_path =
admin_passwd = admin
csv_internal_sep = ,
data_dir = {homedir}/.local/share/Odoo
db_host = False
db_app_name = odoo-{pid}
db_host =
db_maxconn = 64
db_maxconn_gevent = False
db_name = False
db_password = False
db_port = False
db_replica_host = False
db_replica_port = False
db_maxconn_gevent = None
db_name =
db_password =
db_port = None
db_replica_host = None
db_replica_port = None
db_sslmode = prefer
db_template = template0
db_user = False
db_user =
dbfilter =
email_from = False
from_filter = False
email_from =
from_filter =
geoip_city_db = /usr/share/GeoIP/GeoLite2-City.mmdb
geoip_country_db = /usr/share/GeoIP/GeoLite2-Country.mmdb
gevent_port = 8072
http_enable = True
http_interface =
http_interface = 0.0.0.0
http_port = 8069
import_partial =
limit_memory_hard = 2684354560
limit_memory_hard_gevent = False
limit_memory_hard_gevent = None
limit_memory_soft = 2147483648
limit_memory_soft_gevent = False
limit_memory_soft_gevent = None
limit_request = 65536
limit_time_cpu = 60
limit_time_real = 120
limit_time_real_cron = -1
limit_time_worker_cron = 0
list_db = True
log_db = False
log_db =
log_db_level = warning
log_handler = :INFO
log_level = info
@ -48,25 +48,21 @@ proxy_mode = False
reportgz = False
screencasts =
screenshots = /tmp/odoo_tests
server_wide_modules = base,web
smtp_password = False
server_wide_modules = base,rpc,web
smtp_password =
smtp_port = 25
smtp_server = localhost
smtp_ssl = False
smtp_ssl_certificate_filename = False
smtp_ssl_private_key_filename = False
smtp_user = False
smtp_ssl_certificate_filename =
smtp_ssl_private_key_filename =
smtp_user =
syslog = False
test_enable = False
test_file =
test_tags = None
transient_age_limit = 1.0
translate_modules = ['all']
unaccent = False
upgrade_path =
websocket_keep_alive_timeout = 3600
websocket_rate_limit_burst = 10
websocket_rate_limit_delay = 0.2
without_demo = False
with_demo = False
workers = 0
x_sendfile = False

View file

@ -0,0 +1,3 @@
[options]
syslog = True
logfile = /var/log/odoo.log

View file

@ -0,0 +1,9 @@
<Document>
<Header>
<DocumentName>Test Document</DocumentName>
</Header>
<Body>
<Name>Jerry</Name>
<ForeName>Khan</ForeName>
</Body>
</Document>

View file

@ -0,0 +1,6 @@
<Document>
<Header>
<DocumentName t-out="document_name"/>
</Header>
<t t-call="base/tests/file_template/templates/subdir/file_subtemplate.xml"/>
</Document>

View file

@ -0,0 +1,4 @@
<Body>
<Name t-out="partner['name']"/>
<ForeName t-out="partner['forename']"/>
</Body>

View file

@ -0,0 +1,4 @@
<Body>
<Name t-out="partner['name']"/>
<ForeName t-out="partner['forename']"/>
</Body>

View file

@ -0,0 +1 @@
message = 'Hello from Python!'

View file

@ -81,7 +81,7 @@ class TestACL(TransactionCaseWithUserDemo):
"Label for 'decimal_places' must not be found in view definition")
# Make demo user a member of the restricted group and check that the field is back
self.test_group.users += self.user_demo
self.test_group.user_ids += self.user_demo
has_group_test = self.user_demo.has_group(self.TEST_GROUP)
fields = currency.fields_get([])
form_view = currency.get_view(primary.id, 'form')
@ -117,7 +117,7 @@ class TestACL(TransactionCaseWithUserDemo):
partner.write({'bank_ids': []})
# Add the restricted group, and check that it works again
self.test_group.users += self.user_demo
self.test_group.user_ids += self.user_demo
has_group_test = self.user_demo.has_group(self.TEST_GROUP)
self.assertTrue(has_group_test, "`demo` user should now belong to the restricted group")
self.assertTrue(partner.read(['bank_ids']))
@ -186,7 +186,7 @@ class TestACL(TransactionCaseWithUserDemo):
self._set_field_groups(Partner, 'email', self.TEST_GROUP)
views = Partner.with_user(self.user_demo).get_views([(False, 'form')])
self.assertFalse('email' in views['models']['res.partner']["fields"])
self.user_demo.groups_id = [Command.link(self.test_group.id)]
self.user_demo.group_ids = [Command.link(self.test_group.id)]
views = Partner.with_user(self.user_demo).get_views([(False, 'form')])
self.assertTrue('email' in views['models']['res.partner']["fields"])
@ -280,7 +280,7 @@ class TestIrRule(TransactionCaseWithUserDemo):
# create a new group with demo user in it, and a complex rule
group_test = self.env['res.groups'].create({
'name': 'Test Group',
'users': [Command.set(self.user_demo.ids)],
'user_ids': [Command.set(self.user_demo.ids)],
})
# add the rule to the new group, with a domain containing an implicit

View file

@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, Command
from odoo import models, Command
from odoo.addons.base.tests.common import SavepointCaseWithUserDemo
from odoo.tools import mute_logger, unique, lazy
from odoo.tools.constants import PREFETCH_MAX
from odoo.exceptions import AccessError
@ -100,7 +101,7 @@ class TestAPI(SavepointCaseWithUserDemo):
user = self.env.user
self.assertIsRecord(user, 'res.users')
self.assertIsRecord(user.partner_id, 'res.partner')
self.assertIsRecordset(user.groups_id, 'res.groups')
self.assertIsRecordset(user.group_ids, 'res.groups')
for name, field in self.partners._fields.items():
if field.type == 'many2one':
@ -134,8 +135,8 @@ class TestAPI(SavepointCaseWithUserDemo):
self.assertIs(partner.parent_id.user_id.name, False)
self.assertFalse(partner.parent_id.user_id.groups_id)
self.assertIsRecordset(partner.parent_id.user_id.groups_id, 'res.groups')
self.assertFalse(partner.parent_id.user_id.group_ids)
self.assertIsRecordset(partner.parent_id.user_id.group_ids, 'res.groups')
@mute_logger('odoo.models')
def test_40_new_new(self):
@ -208,21 +209,12 @@ class TestAPI(SavepointCaseWithUserDemo):
demo_partner.company_id.write({'name': 'Pricks'})
# remove demo user from all groups
demo.write({'groups_id': [Command.clear()]})
demo.write({'group_ids': [Command.clear()]})
# demo user can no longer access partner data
with self.assertRaises(AccessError):
demo_partner.company_id.name
def test_56_environment_uid_origin(self):
"""Check the expected behavior of `env.uid_origin`"""
user_demo = self.user_demo
user_admin = self.env.ref('base.user_admin')
self.assertEqual(self.env.uid_origin, None)
self.assertEqual(self.env['base'].with_user(user_demo).env.uid_origin, user_demo.id)
self.assertEqual(self.env['base'].with_user(user_demo).with_user(user_admin).env.uid_origin, user_demo.id)
self.assertEqual(self.env['base'].with_user(user_admin).with_user(user_demo).env.uid_origin, user_admin.id)
@mute_logger('odoo.models')
def test_60_cache(self):
""" Check the record cache behavior """
@ -287,7 +279,7 @@ class TestAPI(SavepointCaseWithUserDemo):
@mute_logger('odoo.models')
def test_60_prefetch(self):
""" Check the record cache prefetching """
partners = self.env['res.partner'].search([('id', 'in', self.partners.ids)], limit=models.PREFETCH_MAX)
partners = self.env['res.partner'].search([('id', 'in', self.partners.ids)], limit=PREFETCH_MAX)
self.assertTrue(len(partners) > 1)
# all the records in partners are ready for prefetching
@ -322,7 +314,7 @@ class TestAPI(SavepointCaseWithUserDemo):
@mute_logger('odoo.models')
def test_60_prefetch_model(self):
""" Check the prefetching model. """
partners = self.env['res.partner'].search([('id', 'in', self.partners.ids)], limit=models.PREFETCH_MAX)
partners = self.env['res.partner'].search([('id', 'in', self.partners.ids)], limit=PREFETCH_MAX)
self.assertTrue(partners)
def same_prefetch(a, b):
@ -467,9 +459,9 @@ class TestAPI(SavepointCaseWithUserDemo):
self.assertTrue(p1 in ps)
with self.assertRaisesRegex(TypeError, r"unsupported operand types in: 42 in res\.partner.*"):
42 in ps
_ = 42 in ps
with self.assertRaisesRegex(TypeError, r"inconsistent models in: ir\.ui\.menu.* in res\.partner.*"):
self.env['ir.ui.menu'] in ps
_ = self.env['ir.ui.menu'] in ps
@mute_logger('odoo.models')
def test_80_lazy_contains(self):
@ -479,9 +471,9 @@ class TestAPI(SavepointCaseWithUserDemo):
self.assertTrue(p1 in ps)
with self.assertRaisesRegex(TypeError, r"unsupported operand types in: 42 in res\.partner.*"):
lazy(lambda: 42) in ps
_ = lazy(lambda: 42) in ps
with self.assertRaisesRegex(TypeError, r"inconsistent models in: ir\.ui\.menu.* in res\.partner.*"):
lazy(lambda: self.env['ir.ui.menu']) in ps
_ = lazy(lambda: self.env['ir.ui.menu']) in ps
@mute_logger('odoo.models')
def test_80_set_operations(self):
@ -521,23 +513,23 @@ class TestAPI(SavepointCaseWithUserDemo):
self.assertNotEqual(ps, ms)
with self.assertRaisesRegex(TypeError, r"unsupported operand types in: res\.partner.* \+ 'string'"):
ps + 'string'
_ = ps + 'string'
with self.assertRaisesRegex(TypeError, r"inconsistent models in: res\.partner.* \+ ir\.ui\.menu.*"):
ps + ms
_ = ps + ms
with self.assertRaisesRegex(TypeError, r"inconsistent models in: res\.partner.* - ir\.ui\.menu.*"):
ps - ms
_ = ps - ms
with self.assertRaisesRegex(TypeError, r"inconsistent models in: res\.partner.* & ir\.ui\.menu.*"):
ps & ms
_ = ps & ms
with self.assertRaisesRegex(TypeError, r"inconsistent models in: res\.partner.* \| ir\.ui\.menu.*"):
ps | ms
_ = ps | ms
with self.assertRaises(TypeError):
ps < ms
_ = ps < ms
with self.assertRaises(TypeError):
ps <= ms
_ = ps <= ms
with self.assertRaises(TypeError):
ps > ms
_ = ps > ms
with self.assertRaises(TypeError):
ps >= ms
_ = ps >= ms
@mute_logger('odoo.models')
def test_80_lazy_set_operations(self):
@ -577,23 +569,23 @@ class TestAPI(SavepointCaseWithUserDemo):
self.assertNotEqual(ps, ms)
with self.assertRaisesRegex(TypeError, r"unsupported operand types in: res\.partner.* \+ 'string'"):
ps + 'string'
_ = ps + 'string'
with self.assertRaisesRegex(TypeError, r"inconsistent models in: res\.partner.* \+ ir\.ui\.menu.*"):
ps + ms
_ = ps + ms
with self.assertRaisesRegex(TypeError, r"inconsistent models in: res\.partner.* - ir\.ui\.menu.*"):
ps - ms
_ = ps - ms
with self.assertRaisesRegex(TypeError, r"inconsistent models in: res\.partner.* & ir\.ui\.menu.*"):
ps & ms
_ = ps & ms
with self.assertRaisesRegex(TypeError, r"inconsistent models in: res\.partner.* \| ir\.ui\.menu.*"):
ps | ms
_ = ps | ms
with self.assertRaises(TypeError):
ps < ms
_ = ps < ms
with self.assertRaises(TypeError):
ps <= ms
_ = ps <= ms
with self.assertRaises(TypeError):
ps > ms
_ = ps > ms
with self.assertRaises(TypeError):
ps >= ms
_ = ps >= ms
@mute_logger('odoo.models')
def test_80_filter(self):
@ -659,38 +651,6 @@ class TestAPI(SavepointCaseWithUserDemo):
by_name_ids = [p.id for p in sorted(ps, key=lambda p: p.name, reverse=True)]
self.assertEqual(ps.sorted('name', reverse=True).ids, by_name_ids)
# sorted doesn't filter out new records but don't sort them either (limitation)
new_p = self.env['res.partner'].new({
'child_ids': [
Command.create({'name': 'z'}),
Command.create({'name': 'a'}),
],
})
self.assertEqual(len(new_p.child_ids.sorted()), 2)
# sorted keeps the _prefetch_ids
partners_with_children = self.env['res.partner'].create([
{
'name': 'required',
'child_ids': [
Command.create({'name': 'z'}),
Command.create({'name': 'a'}),
],
},
{
'name': 'required',
'child_ids': [
Command.create({'name': 'z'}),
Command.create({'name': 'a'}),
],
},
])
partners_with_children.invalidate_model(['name'])
# Only one query to fetch name of children of each partner
with self.assertQueryCount(1):
for partner in partners_with_children:
partner.child_ids.sorted('id').mapped('name')
def test_group_on(self):
p0, p1, p2 = self.env['res.partner'].create([
{'name': "bob", 'function': "guest"},
@ -712,20 +672,9 @@ class TestAPI(SavepointCaseWithUserDemo):
with self.subTest("Should allow cross-group prefetching"):
byfn = (p0 | p1 | p2).grouped('function')
self.env.invalidate_all(flush=False)
self.assertFalse(self.env.cache._data, "ensure the cache is empty")
self.assertFalse(self.env.transaction.field_data, "ensure the cache is empty")
self.assertEqual(byfn['guest'].mapped('name'), ['bob', 'rhod'])
# name should have been prefetched by previous statement (on guest
# group), so should be nothing here
with self.assertQueries([]):
_ = byfn['host'].name
class TestExternalAPI(SavepointCaseWithUserDemo):
def test_call_kw(self):
"""kwargs is not modified by the execution of the call"""
partner = self.env['res.partner'].create({'name': 'MyPartner1'})
args = (partner.ids, ['name'])
kwargs = {'context': {'test': True}}
api.call_kw(self.env['res.partner'], 'read', args, kwargs)
self.assertEqual(kwargs, {'context': {'test': True}})

View file

@ -31,7 +31,8 @@ class TestAvatarMixin(TransactionCase):
self.external_partner = self.env['res.partner'].create({
'name': 'Josh Demo',
'email': 'josh.brown23@example.com',
'image_1920': False
'image_1920': False,
'create_date': '2015-11-12 00:00:00',
})
def test_partner_has_avatar_even_if_it_has_no_image(self):
@ -55,7 +56,14 @@ class TestAvatarMixin(TransactionCase):
self.assertEqual(self.user_without_name.partner_id._avatar_get_placeholder(), b64decode(self.user_without_name.partner_id.avatar_1920))
def test_external_partner_has_default_placeholder_image_as_avatar(self):
self.assertEqual(self.external_partner._avatar_get_placeholder(), b64decode(self.external_partner.avatar_1920))
expectedAvatar = (
"<?xml version='1.0' encoding='UTF-8' ?>"
"<svg height='180' width='180' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>"
"<rect fill='hsl(71, 48%, 45%)' height='180' width='180'/>"
"<text fill='#ffffff' font-size='96' text-anchor='middle' x='90' y='125' font-family='sans-serif'>J</text>"
"</svg>"
)
self.assertEqual(expectedAvatar, b64decode(self.external_partner.avatar_1920).decode('utf-8'))
def test_partner_and_user_have_the_same_avatar(self):
self.assertEqual(self.user_without_image.partner_id.avatar_1920, self.user_without_image.avatar_1920)

View file

@ -9,7 +9,6 @@ from odoo import Command
from odoo.tests.common import TransactionCase, BaseCase
from odoo.tools import mute_logger
from odoo.tools.safe_eval import safe_eval, const_eval, expr_eval
from odoo.addons.base.tests.common import TransactionCaseWithUserDemo
class TestSafeEval(BaseCase):
@ -35,17 +34,17 @@ class TestSafeEval(BaseCase):
self.assertEqual(expr_eval(expr), expected)
def test_safe_eval_opcodes(self):
for expr, locals_dict, expected in [
for expr, context, expected in [
('[x for x in (1,2)]', {}, [1, 2]), # LOAD_FAST_AND_CLEAR
('list(x for x in (1,2))', {}, [1, 2]), # END_FOR, CALL_INTRINSIC_1
('v if v is None else w', {'v': False, 'w': 'foo'}, 'foo'), # POP_JUMP_IF_NONE
('v if v is not None else w', {'v': None, 'w': 'foo'}, 'foo'), # POP_JUMP_IF_NOT_NONE
('{a for a in (1, 2)}', {}, {1, 2}), # RERAISE
]:
self.assertEqual(safe_eval(expr, locals_dict=locals_dict), expected)
self.assertEqual(safe_eval(expr, context), expected)
def test_safe_eval_exec_opcodes(self):
for expr, locals_dict, expected in [
for expr, context, expected in [
("""
def f(v):
if v:
@ -54,8 +53,47 @@ class TestSafeEval(BaseCase):
result = f(42)
""", {}, 1), # LOAD_FAST_CHECK
]:
safe_eval(dedent(expr), locals_dict=locals_dict, mode="exec", nocopy=True)
self.assertEqual(locals_dict['result'], expected)
safe_eval(dedent(expr), context, mode="exec")
self.assertEqual(context['result'], expected)
def test_safe_eval_strips(self):
# cpython strips spaces and tabs by deafult since 3.10
# https://github.com/python/cpython/commit/e799aa8b92c195735f379940acd9925961ad04ec
# but we need to strip all whitespaces
for expr, expected in [
# simple ascii
("\n 1 + 2", 3),
("\n\n\t 1 + 2 \n", 3),
(" 1 + 2", 3),
("1 + 2 ", 3),
# Unicode (non-ASCII spaces)
("\u00A01 + 2\u00A0", 3), # nbsp
]:
self.assertEqual(safe_eval(expr), expected)
def test_runs_top_level_scope(self):
# when we define var in top-level scope, it should become available in locals and globals
# such that f's frame will be able to access var too.
expr = dedent("""
var = 1
def f():
return var
f()
""")
safe_eval(expr, mode="exec")
safe_eval(expr, context={}, mode="exec")
def test_safe_eval_ctx_mutation(self):
# simple eval also has side-effect on context
expr = '(answer := 42)'
context = {}
self.assertEqual(safe_eval(expr, context), 42)
self.assertEqual(context, {'answer': 42})
def test_safe_eval_ctx_no_builtins(self):
ctx = {'__builtins__': {'max': min}}
self.assertEqual(safe_eval('max(1, 2)', ctx), 2)
def test_01_safe_eval(self):
""" Try a few common expressions to verify they work with safe_eval """
@ -160,22 +198,64 @@ class TestParentStore(TransactionCase):
class TestGroups(TransactionCase):
def test_res_groups_fullname_search(self):
monkey = self.env['res.groups.privilege'].create({'name': 'Monkey'})
self.env['res.groups']._load_records([{
'xml_id': 'base.test_monkey_banana',
'values': {'name': 'Banana', 'privilege_id': monkey.id},
}, {
'xml_id': 'base.test_monkey_stuff',
'values': {'name': 'Stuff', 'privilege_id': monkey.id},
}, {
'xml_id': 'base.test_monkey_administrator',
'values': {'name': 'Administrator', 'privilege_id': monkey.id},
}, {
'xml_id': 'base.test_donky',
'values': {'name': 'Donky Monkey'},
}])
all_groups = self.env['res.groups'].search([])
groups = all_groups.search([('full_name', 'like', 'Sale')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Sale' in g.full_name],
"did not match search for 'Sale'")
groups = all_groups.search([('full_name', 'like', 'Technical')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Technical' in g.full_name],
"did not match search for 'Technical'")
groups = all_groups.search([('full_name', 'like', 'Master Data')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Master Data' in g.full_name],
"did not match search for 'Master Data'")
groups = all_groups.search([('full_name', 'like', 'Sales /')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Sales /' in g.full_name],
"did not match search for 'Sales /'")
groups = all_groups.search([('full_name', 'like', 'Monkey/Banana')])
self.assertItemsEqual(groups.mapped('full_name'), ['Monkey / Banana'],
"did not match search for 'Monkey/Banana'")
groups = all_groups.search([('full_name', 'in', ['Administration / Access Rights','Contact Creation'])])
self.assertTrue(groups, "did not match search for 'Administration / Access Rights' and 'Contact Creation'")
groups = all_groups.search([('full_name', 'like', 'Monkey /')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Monkey /' in g.full_name],
"did not match search for 'Monkey /'")
groups = all_groups.search([('full_name', 'like', 'Monk /')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Monkey /' in g.full_name],
"did not match search for 'Monk /'")
groups = all_groups.search([('full_name', 'like', 'Monk')])
self.assertItemsEqual(groups.ids, [g.id for g in all_groups if 'Monk' in g.full_name],
"did not match search for 'Monk'")
groups = all_groups.search([('full_name', 'in', ['Creation'])])
self.assertItemsEqual(groups.mapped('full_name'), ['Contact / Creation'])
groups = all_groups.search([('full_name', 'in', ['Role / Administrator', 'Creation'])])
self.assertItemsEqual(groups.mapped('full_name'), ['Contact / Creation', 'Role / Administrator'])
groups = all_groups.search([('full_name', 'like', 'Admin')])
self.assertItemsEqual(groups.mapped('full_name'), [g.full_name for g in all_groups if 'Admin' in g.full_name])
groups = all_groups.search([('full_name', 'not like', 'Role /')])
self.assertItemsEqual(groups.mapped('full_name'), [g.full_name for g in all_groups if 'Role /' not in g.full_name])
groups = all_groups.search([('full_name', '=', False)])
self.assertFalse(groups)
groups = all_groups.search([('full_name', '!=', False)])
self.assertEqual(groups, all_groups)
groups = all_groups.search([('full_name', 'like', '/')])
self.assertTrue(groups, "did not match search for '/'")
@ -211,25 +291,24 @@ class TestGroups(TransactionCase):
def test_remove_groups(self):
u1 = self.env['res.users'].create({'login': 'u1', 'name': 'U1'})
u2 = self.env['res.users'].create({'login': 'u2', 'name': 'U2'})
default = self.env.ref('base.default_user')
portal = self.env.ref('base.group_portal')
p = self.env['res.users'].create({'login': 'p', 'name': 'P', 'groups_id': [Command.set([portal.id])]})
p = self.env['res.users'].create({'login': 'p', 'name': 'P', 'group_ids': [Command.set([portal.id])]})
a = self.env['res.groups'].create({'name': 'A', 'users': [Command.set(u1.ids)]})
b = self.env['res.groups'].create({'name': 'B', 'users': [Command.set(u1.ids)]})
c = self.env['res.groups'].create({'name': 'C', 'implied_ids': [Command.set(a.ids)], 'users': [Command.set([p.id, u2.id, default.id])]})
d = self.env['res.groups'].create({'name': 'D', 'implied_ids': [Command.set(a.ids)], 'users': [Command.set([u2.id, default.id])]})
a = self.env['res.groups'].create({'name': 'A', 'user_ids': [Command.set(u1.ids)]})
b = self.env['res.groups'].create({'name': 'B', 'user_ids': [Command.set(u1.ids)]})
c = self.env['res.groups'].create({'name': 'C', 'implied_ids': [Command.set(a.ids)], 'user_ids': [Command.set([p.id, u2.id])]})
d = self.env['res.groups'].create({'name': 'D', 'implied_ids': [Command.set(a.ids)], 'user_ids': [Command.set([u2.id])]})
def assertUsersEqual(users, group):
self.assertEqual(
sorted([r.login for r in users]),
sorted([r.login for r in group.with_context(active_test=False).users])
sorted(group.with_context(active_test=False).mapped('all_user_ids.login'))
)
# sanity checks
assertUsersEqual([u1, u2, p, default], a)
assertUsersEqual([u1, u2, p], a)
assertUsersEqual([u1], b)
assertUsersEqual([u2, p, default], c)
assertUsersEqual([u2, default], d)
assertUsersEqual([u2, p], c)
assertUsersEqual([u2], d)
# C already implies A, we want none of B+C to imply A
(b + c)._remove_group(a)
@ -243,15 +322,14 @@ class TestGroups(TransactionCase):
# not have U1 as a user
# - P should be removed as was only added via inheritance to C
# - U2 should not be removed from A since it is implied via C but also via D
assertUsersEqual([u1, u2, default], a)
assertUsersEqual([u1, u2], a)
assertUsersEqual([u1], b)
assertUsersEqual([u2, p, default], c)
assertUsersEqual([u2, default], d)
assertUsersEqual([u2, p], c)
assertUsersEqual([u2], d)
# When adding the template user to a new group, it should add it to existing internal users
# When adding a new group to all users
e = self.env['res.groups'].create({'name': 'E'})
default.write({'groups_id': [Command.link(e.id)]})
self.assertIn(u1, e.users)
self.assertIn(u2, e.users)
self.assertIn(default, e.with_context(active_test=False).users)
self.assertNotIn(p, e.users)
self.env.ref('base.group_user').write({'implied_ids': [Command.link(e.id)]})
self.assertIn(u1, e.all_user_ids)
self.assertIn(u2, e.all_user_ids)
self.assertNotIn(p, e.all_user_ids)

View file

@ -115,7 +115,7 @@ class TestRecordCache(TransactionCaseWithUserDemo):
rss0 = process.memory_info().rss
char_names = [
'name', 'display_name', 'email', 'website', 'phone', 'mobile',
'name', 'display_name', 'email', 'website', 'phone',
'street', 'street2', 'city', 'zip', 'vat', 'ref',
]
for name in char_names:

View file

@ -0,0 +1,186 @@
import io
import os
import re
import subprocess as sp
import sys
import textwrap
import time
import unittest
from pathlib import Path
from odoo.cli.command import commands, load_addons_commands, load_internal_commands
from odoo.tests import BaseCase, TransactionCase
from odoo.tools import config, file_path
class TestCommand(BaseCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.odoo_bin = Path(__file__).parents[4].resolve() / 'odoo-bin'
addons_path = config.format('addons_path', config['addons_path'])
cls.run_args = (sys.executable, cls.odoo_bin, f'--addons-path={addons_path}')
def run_command(self, *args, check=True, capture_output=True, text=True, **kwargs):
return sp.run(
[*self.run_args, *args],
capture_output=capture_output,
check=check,
text=text,
**kwargs
)
def popen_command(self, *args, capture_output=True, text=True, **kwargs):
if capture_output:
kwargs['stdout'] = kwargs['stderr'] = sp.PIPE
return sp.Popen(
[*self.run_args, *args],
text=text,
**kwargs
)
def test_docstring(self):
load_internal_commands()
load_addons_commands()
for name, cmd in commands.items():
self.assertTrue(cmd.__doc__,
msg=f"Command {name} needs a docstring to be displayed with 'odoo-bin help'")
self.assertFalse('\n' in cmd.__doc__ or len(cmd.__doc__) > 120,
msg=f"Command {name}'s docstring format is invalid for 'odoo-bin help'")
def test_unknown_command(self):
for name in ('bonbon', 'café'):
with self.subTest(name):
command_output = self.run_command(name, check=False).stderr.strip()
self.assertEqual(
command_output,
f"Unknown command '{name}'.\n"
"Use 'odoo-bin --help' to see the list of available commands."
)
def test_help(self):
expected = {
'cloc',
'db',
'deploy',
'help',
'neutralize',
'obfuscate',
'populate',
'scaffold',
'server',
'shell',
'start',
'upgrade_code',
}
for option in ('help', '-h', '--help'):
with self.subTest(option=option):
actual = set()
for line in self.run_command(option).stdout.splitlines():
if line.startswith(" ") and (result := re.search(r' (\w+)\s+(\w.*)$', line)):
actual.add(result.groups()[0])
self.assertGreaterEqual(actual, expected, msg="Help is not showing required commands")
def test_help_subcommand(self):
"""Just execute the help for each internal sub-command"""
load_internal_commands()
for name in commands:
with self.subTest(command=name):
self.run_command(name, '--help', timeout=10)
def test_upgrade_code_example(self):
proc = self.run_command('upgrade_code', '--script', '17.5-00-example', '--dry-run')
self.assertFalse(proc.stdout, "there should be no file modified by the example script")
self.assertFalse(proc.stderr)
def test_upgrade_code_help(self):
proc = self.run_command('upgrade_code', '--help')
self.assertIn("usage: ", proc.stdout)
self.assertIn("Rewrite the entire source code", proc.stdout)
self.assertFalse(proc.stderr)
def test_upgrade_code_standalone(self):
from odoo.cli import upgrade_code # noqa: PLC0415
proc = sp.run(
[sys.executable, upgrade_code.__file__, '--help'],
check=True, capture_output=True, text=True
)
self.assertIn("usage: ", proc.stdout)
self.assertIn("Rewrite the entire source code", proc.stdout)
self.assertFalse(proc.stderr)
@unittest.skipIf(os.name != 'posix', '`os.openpty` only available on POSIX systems')
def test_shell(self):
main, child = os.openpty()
shell = self.popen_command(
'shell',
'--shell-interface=python',
'--shell-file', file_path('base/tests/shell_file.txt'),
stdin=main,
close_fds=True,
)
with os.fdopen(child, 'w', encoding="utf-8") as stdin_file:
stdin_file.write(
'print(message)\n'
'exit()\n'
)
self.assertFalse(shell.wait(), "exited with a non 0 code")
# we skip local variables as they differ based on configuration (e.g.: if a database is specified or not)
lines = [line for line in shell.stdout.read().splitlines() if line.startswith('>>>')]
self.assertEqual(lines, [">>> Hello from Python!", '>>> '])
class TestCommandUsingDb(TestCommand, TransactionCase):
@unittest.skipIf(
os.name != 'posix' and sys.version_info < (3, 12),
"os.set_blocking on files only available in windows starting 3.12",
)
def test_i18n_export(self):
# i18n export is a process that takes a long time to run, we are
# not interrested in running it in full, we are only interrested
# in making sure it starts correctly.
#
# This test only asserts the first few lines and then SIGTERM
# the process. We took the challenge to write a cross-platform
# test, the lack of a select-like API for Windows makes the code
# a bit complicated. Sorry :/
expected_text = textwrap.dedent("""\
# Translation of Odoo Server.
# This file contains the translation of the following modules:
# \t* base
""").encode()
proc = self.popen_command(
'i18n', 'export', '-d', self.env.cr.dbname, '-o', '-', 'base',
# ensure we get a io.FileIO and not a buffered or text shit
text=False, bufsize=0,
)
# Feed the buffer for maximum 15 seconds.
buffer = io.BytesIO()
timeout = time.monotonic() + 15
os.set_blocking(proc.stdout.fileno(), False)
while buffer.tell() < len(expected_text) and time.monotonic() < timeout:
if chunk := proc.stdout.read(len(expected_text) - buffer.tell()):
buffer.write(chunk)
else:
# would had loved to use select() for its timeout, but
# select doesn't work on files on windows, use a flat
# sleep instead: not great, not terrible.
time.sleep(.1)
proc.terminate()
try:
proc.wait(timeout=5)
except sp.TimeoutExpired:
proc.kill()
raise
self.assertEqual(buffer.getvalue(), expected_text,
"The subprocess did not write the prelude in under 15 seconds.")

View file

@ -1,13 +1,19 @@
import os
import unittest
from unittest.mock import call, patch
import odoo
from odoo.tests import TransactionCase
from odoo.tools import file_path, file_open, file_open_temporary_directory
from odoo.tools.config import conf, configmanager, _get_default_datadir
from odoo.tools import file_open, file_open_temporary_directory, file_path
from odoo.tools.config import configmanager
EMPTY_CONFIG_PATH = file_path('base/tests/config/empty.conf')
PROJECT_PATH = odoo.tools.config.root_path.removesuffix('/odoo')
DEFAULT_DATADIR = odoo.tools.config._default_options['data_dir']
IS_POSIX = 'workers' in odoo.tools.config.options
ROOT_PATH = odoo.tools.config.options['root_path'].removesuffix('/odoo')
MISSING_HTTP_INTERFACE = """\
WARNING:odoo.tools.config:missing --http-interface/http_interface, \
using 0.0.0.0 by default, will change to 127.0.0.1 in 20.0"""
class TestConfigManager(TransactionCase):
@ -15,42 +21,79 @@ class TestConfigManager(TransactionCase):
def setUp(self):
super().setUp()
# _parse_config() as the side-effect of changing those two
# values, make sure the original value is restored at the end.
self.patch(conf, 'addons_paths', odoo.conf.addons_paths)
self.patch(conf, 'server_wide_modules', odoo.conf.server_wide_modules)
patcher = patch.dict('os.environ', {'ODOO_RC': EMPTY_CONFIG_PATH}, clear=True)
patcher.start()
self.addCleanup(patcher.stop)
self.config = configmanager()
def parse_reset(self, args=None):
with (
patch.dict(self.config._runtime_options, {}),
patch.dict(self.config._cli_options, {}),
patch.dict(self.config._env_options, {}),
patch.dict(self.config._file_options, {}),
patch.dict(self.config._default_options, {}),
):
cli = self.config._parse_config(args)
return cli, dict(self.config.options)
def assertConfigEqual(self, truth):
try:
self.assertEqual(dict(self.config.options), truth)
except AssertionError as exc1:
for k in set(self.config.options).intersection(truth):
try:
self.assertEqual(self.config.options[k], truth[k], f"{k!r} doesn't match")
except AssertionError as exc2:
if hasattr(Exception, 'add_note'): # 3.11
exc2.add_note(str(self.config._get_sources(k)))
raise exc2 from exc1
raise AssertionError(f"{exc2.args[0]}\n{self.config._get_sources(k)}") from exc1
if missing := set(self.config.options).difference(truth):
e = "missing from the test dict: " + ', '.join(missing)
raise AssertionError(e) from exc1
if missing := set(truth).difference(self.config.options):
e = "missing from the configuration: " + ', '.join(missing)
raise AssertionError(e) from exc1
raise
def test_00_setUp(self):
self.assertEqual(self.config.options['config'], EMPTY_CONFIG_PATH)
def test_01_default_config(self):
config = configmanager(fname=file_path('base/tests/config/empty.conf'))
default_values = {
self.assertConfigEqual({
# options not exposed on the command line
'admin_passwd': 'admin',
'bin_path': '',
'csv_internal_sep': ',',
'default_productivity_apps': False,
'proxy_access_token': '',
'publisher_warranty_url': 'http://services.odoo.com/publisher-warranty/',
'reportgz': False,
'root_path': f'{ROOT_PATH}/odoo',
'websocket_rate_limit_burst': 10,
'websocket_rate_limit_delay': 0.2,
'websocket_keep_alive_timeout': 3600,
# common
'config': None,
'save': None,
'config': EMPTY_CONFIG_PATH,
'save': False,
'init': {},
'update': {},
'without_demo': False,
'demo': {},
'reinit': [],
'with_demo': False,
'import_file_maxbytes': 10485760,
'import_file_timeout': 3,
'import_partial': '',
'import_url_regex': '^(?:http|https)://',
'pidfile': '',
'addons_path': f'{ROOT_PATH}/odoo/addons,{ROOT_PATH}/addons',
'upgrade_path': '',
'pre_upgrade_scripts': '',
'server_wide_modules': 'base,web',
'data_dir': _get_default_datadir(),
'addons_path': [],
'upgrade_path': [],
'pre_upgrade_scripts': [],
'server_wide_modules': ['base', 'rpc', 'web'],
'data_dir': DEFAULT_DATADIR,
# HTTP
'http_interface': '',
'http_interface': '0.0.0.0',
'http_port': 8069,
'gevent_port': 8072,
'http_enable': True,
@ -71,49 +114,44 @@ class TestConfigManager(TransactionCase):
'logfile': '',
'syslog': False,
'log_handler': [':INFO'],
'log_db': False,
'log_db': '',
'log_db_level': 'warning',
'log_level': 'info',
# SMTP
'email_from': False,
'from_filter': False,
'email_from': '',
'from_filter': '',
'smtp_server': 'localhost',
'smtp_port': 25,
'smtp_ssl': False,
'smtp_user': False,
'smtp_password': False,
'smtp_ssl_certificate_filename': False,
'smtp_ssl_private_key_filename': False,
'smtp_user': '',
'smtp_password': '',
'smtp_ssl_certificate_filename': '',
'smtp_ssl_private_key_filename': '',
# database
'db_name': False,
'db_user': False,
'db_password': False,
'db_name': [],
'db_user': '',
'db_password': '',
'pg_path': '',
'db_host': False,
'db_port': False,
'db_host': '',
'db_port': None,
'db_sslmode': 'prefer',
'db_maxconn': 64,
'db_maxconn_gevent': False,
'db_maxconn_gevent': None,
'db_template': 'template0',
'db_replica_host': False,
'db_replica_port': False,
'db_replica_host': None,
'db_replica_port': None,
'db_app_name': 'odoo-{pid}',
# i18n
'load_language': None,
'language': None,
'translate_out': '',
'translate_in': '',
'overwrite_existing_translations': False,
'translate_modules': ['all'],
# security
'list_db': True,
# advanced
'dev_mode': [],
'shell_interface': None,
'stop_after_init': False,
'osv_memory_count_limit': 0,
'transient_age_limit': 1.0,
@ -122,52 +160,52 @@ class TestConfigManager(TransactionCase):
'unaccent': False,
'geoip_city_db': '/usr/share/GeoIP/GeoLite2-City.mmdb',
'geoip_country_db': '/usr/share/GeoIP/GeoLite2-Country.mmdb',
}
if IS_POSIX:
# multiprocessing
default_values.update(
{
'workers': 0,
'limit_memory_soft': 2048 * 1024 * 1024,
'limit_memory_soft_gevent': False,
'limit_memory_hard': 2560 * 1024 * 1024,
'limit_memory_hard_gevent': False,
'limit_time_cpu': 60,
'limit_time_real': 120,
'limit_time_real_cron': -1,
'limit_request': 2**16,
}
)
config._parse_config()
self.assertEqual(config.options, default_values, "Options don't match")
'workers': 0,
'limit_memory_soft': 2048 * 1024 * 1024,
'limit_memory_soft_gevent': None,
'limit_memory_hard': 2560 * 1024 * 1024,
'limit_memory_hard_gevent': None,
'limit_time_cpu': 60,
'limit_time_real': 120,
'limit_time_real_cron': -1,
'limit_request': 2**16,
})
def test_02_config_file(self):
values = {
config_path = file_path('base/tests/config/non_default.conf')
with self.assertLogs('odoo.tools.config', 'WARNING') as capture:
self.config._parse_config(['-c', config_path])
self.assertConfigEqual({
# options not exposed on the command line
'admin_passwd': 'Tigrou007',
'bin_path': '',
'csv_internal_sep': '@',
'default_productivity_apps': False,
'proxy_access_token': '',
'publisher_warranty_url': 'http://example.com', # blacklist for save, read from the config file
'reportgz': True,
'root_path': f'{ROOT_PATH}/odoo', # blacklist for save, ignored from the config file
'websocket_rate_limit_burst': '1',
'websocket_rate_limit_delay': '2',
'websocket_keep_alive_timeout': '600',
'websocket_rate_limit_burst': 1,
'websocket_rate_limit_delay': 2.0,
'websocket_keep_alive_timeout': 600,
# common
'config': '/tmp/config', # blacklist for save, read from the config file
'save': True, # blacklist for save, read from the config file
'config': config_path,
'save': False,
'init': {}, # blacklist for save, ignored from the config file
'update': {}, # blacklist for save, ignored from the config file
'without_demo': True,
'demo': {}, # blacklist for save, ignored from the config file
'import_partial': '/tmp/import-partial',
'reinit': [],
'with_demo': True,
'import_file_maxbytes': 10485760,
'import_file_timeout': 3,
'import_partial': '',
'import_url_regex': '^(?:http|https)://',
'pidfile': '/tmp/pidfile',
'addons_path': '/tmp/odoo',
'upgrade_path': '/tmp/upgrade',
'pre_upgrade_scripts': '/tmp/pre-custom.py',
'server_wide_modules': 'base,mail',
'addons_path': [], # the path found in the config file is invalid
'upgrade_path': [], # the path found in the config file is invalid
'pre_upgrade_scripts': [], # the path found in the config file is invalid
'server_wide_modules': ['web', 'base', 'mail'],
'data_dir': '/tmp/data-dir',
# HTTP
@ -182,9 +220,9 @@ class TestConfigManager(TransactionCase):
'dbfilter': '.*',
# testing
'test_file': '/tmp/file-file',
'test_enable': True,
'test_tags': ':TestMantra.test_is_extra_mile_done',
'test_file': '',
'test_enable': False,
'test_tags': None,
'screencasts': '/tmp/screencasts',
'screenshots': '/tmp/screenshots',
@ -208,7 +246,7 @@ class TestConfigManager(TransactionCase):
'smtp_ssl_private_key_filename': '/tmp/tlskey',
# database
'db_name': 'horizon',
'db_name': ['horizon'],
'db_user': 'kiwi',
'db_password': 'Tigrou0073',
'pg_path': '/tmp/pg_path',
@ -220,22 +258,18 @@ class TestConfigManager(TransactionCase):
'db_template': 'backup1706',
'db_replica_host': 'db2.localhost',
'db_replica_port': 2038,
'db_app_name': 'odoo-{pid}',
# i18n
'load_language': 'fr_FR', # blacklist for save, read from the config file
'language': 'fr_FR', # blacklist for save, read from the config file
'translate_out': '/tmp/translate_out.csv', # blacklist for save, read from the config file
'translate_in': '/tmp/translate_in.csv', # blacklist for save, read from the config file
'overwrite_existing_translations': True, # blacklist for save, read from the config file
'translate_modules': ['all'], # ignored from the config file
'overwrite_existing_translations': False, # blacklist for save, read from the config file
# security
'list_db': False,
# advanced
'dev_mode': [], # blacklist for save, ignored from the config file
'shell_interface': 'ipython', # blacklist for save, read from the config file
'stop_after_init': True, # blacklist for save, read from the config file
'dev_mode': ['xml'], # blacklist for save, read from the config file
'stop_after_init': False,
'osv_memory_count_limit': 71,
'transient_age_limit': 4.0,
'max_cron_threads': 4,
@ -243,82 +277,84 @@ class TestConfigManager(TransactionCase):
'unaccent': True,
'geoip_city_db': '/tmp/city.db',
'geoip_country_db': '/tmp/country.db',
}
if IS_POSIX:
# multiprocessing
values.update(
{
'workers': 92,
'limit_memory_soft': 1048576,
'limit_memory_soft_gevent': 1048577,
'limit_memory_hard': 1048578,
'limit_memory_hard_gevent': 1048579,
'limit_time_cpu': 60,
'limit_time_real': 61,
'limit_time_real_cron': 62,
'limit_request': 100,
}
)
'workers': 92,
'limit_memory_soft': 1048576,
'limit_memory_soft_gevent': 1048577,
'limit_memory_hard': 1048578,
'limit_memory_hard_gevent': 1048579,
'limit_time_cpu': 60,
'limit_time_real': 61,
'limit_time_real_cron': 62,
'limit_request': 100,
})
self.assertEqual(capture.output, [
"WARNING:odoo.tools.config:option addons_path, no such directory '/tmp/odoo', skipped",
"WARNING:odoo.tools.config:option upgrade_path, no such directory '/tmp/upgrade', skipped",
"WARNING:odoo.tools.config:option pre_upgrade_scripts, no such file '/tmp/pre-custom.py', skipped",
])
config_path = file_path('base/tests/config/non_default.conf')
config = configmanager(fname=config_path)
self.assertEqual(config.rcfile, config_path, "Config file path doesn't match")
config._parse_config()
self.assertEqual(config.options, values, "Options don't match")
self.assertEqual(config.rcfile, config_path)
self.assertNotEqual(config.rcfile, config['config']) # funny
@unittest.skipIf(not IS_POSIX, 'this test is POSIX only')
@unittest.skipIf(os.name != 'posix', 'this test is POSIX only')
def test_03_save_default_options(self):
with file_open_temporary_directory(self.env) as temp_dir:
config_path = f'{temp_dir}/save.conf'
config = configmanager(fname=config_path)
config._parse_config(['--config', config_path, '--save'])
self.config._parse_config(['--config', config_path, '--save'])
with (file_open(config_path, env=self.env) as config_file,
file_open('base/tests/config/save_posix.conf', env=self.env) as save_file):
config_content = config_file.read().rstrip()
save_content = save_file.read().format(
root_path=ROOT_PATH,
homedir=config._normalize('~'),
project_path=PROJECT_PATH,
homedir=self.config._normalize('~'),
empty_dict=r'{}',
pid='{pid}',
)
self.assertEqual(config_content.splitlines(), save_content.splitlines())
def test_04_odoo16_config_file(self):
# test that loading the Odoo 16.0 generated default config works
# with a modern version
config = configmanager(fname=file_path('base/tests/config/16.0.conf'))
assert_options = {
config_path = file_path('base/tests/config/16.0.conf')
with self.assertLogs('odoo.tools.config', 'WARNING') as capture:
self.config._parse_config(['--config', config_path])
with (
self.assertNoLogs('py.warnings'),
self.assertLogs('odoo.tools.config', 'WARNING') as capture_warn,
):
self.config._warn_deprecated_options()
self.assertConfigEqual({
# options taken from the configuration file
'admin_passwd': 'admin',
'config': config_path,
'csv_internal_sep': ',',
'db_host': False,
'db_host': '',
'db_maxconn': 64,
'db_name': False,
'db_password': False,
'db_port': False,
'db_name': [],
'db_password': '',
'db_port': None,
'db_sslmode': 'prefer',
'db_template': 'template0',
'db_user': False,
'db_user': '',
'dbfilter': '',
'demo': {},
'email_from': False,
'demo': '{}',
'email_from': '',
'geoip_city_db': '/usr/share/GeoIP/GeoLite2-City.mmdb',
'http_enable': True,
'http_interface': '',
'http_interface': '0.0.0.0',
'http_port': 8069,
'import_file_maxbytes': 10485760,
'import_file_timeout': 3,
'import_partial': '',
'import_url_regex': '^(?:http|https)://',
'list_db': True,
'load_language': None,
'log_db': False,
'log_db': '',
'log_db_level': 'warning',
'log_handler': [':INFO'],
'log_level': 'info',
'logfile': '',
'max_cron_threads': 2,
'limit_time_worker_cron': 0,
'osv_memory_count_limit': 0,
'overwrite_existing_translations': False,
'pg_path': '',
@ -327,112 +363,146 @@ class TestConfigManager(TransactionCase):
'reportgz': False,
'screencasts': '',
'screenshots': '/tmp/odoo_tests',
'server_wide_modules': 'base,web',
'smtp_password': False,
'server_wide_modules': ['base', 'web'],
'smtp_password': '',
'smtp_port': 25,
'smtp_server': 'localhost',
'smtp_ssl': False,
'smtp_user': False,
'smtp_user': '',
'syslog': False,
'test_enable': False,
'test_file': '',
'test_tags': None,
'transient_age_limit': 1.0,
'translate_modules': ['all'],
'translate_modules': "['all']",
'unaccent': False,
'update': {},
'upgrade_path': '',
'pre_upgrade_scripts': '',
'without_demo': False,
'reinit': [],
'upgrade_path': [],
'pre_upgrade_scripts': [],
'with_demo': True,
# options that are not taken from the file (also in 14.0)
'addons_path': f'{ROOT_PATH}/odoo/addons,{ROOT_PATH}/addons',
'config': None,
'data_dir': _get_default_datadir(),
'addons_path': [],
'data_dir': DEFAULT_DATADIR,
'dev_mode': [],
'geoip_database': '/usr/share/GeoIP/GeoLite2-City.mmdb',
'init': {},
'language': None,
'publisher_warranty_url': 'http://services.odoo.com/publisher-warranty/',
'save': None,
'shell_interface': None,
'save': False,
'stop_after_init': False,
'root_path': f'{ROOT_PATH}/odoo',
'translate_in': '',
'translate_out': '',
# undocummented options
'bin_path': '',
'default_productivity_apps': False,
'osv_memory_age_limit': 'False',
'proxy_access_token': '',
# multiprocessing
'workers': 0,
'limit_memory_soft': 2048 * 1024 * 1024,
'limit_memory_soft_gevent': None,
'limit_memory_hard': 2560 * 1024 * 1024,
'limit_memory_hard_gevent': None,
'limit_time_cpu': 60,
'limit_time_real': 120,
'limit_time_real_cron': -1,
'limit_request': 1 << 16,
# new options since 14.0
'db_maxconn_gevent': False,
'db_replica_host': False,
'db_replica_port': False,
'db_maxconn_gevent': None,
'db_replica_host': None,
'db_replica_port': None,
'db_app_name': 'odoo-{pid}',
'geoip_country_db': '/usr/share/GeoIP/GeoLite2-Country.mmdb',
'from_filter': False,
'from_filter': '',
'gevent_port': 8072,
'smtp_ssl_certificate_filename': False,
'smtp_ssl_private_key_filename': False,
'websocket_keep_alive_timeout': '3600',
'websocket_rate_limit_burst': '10',
'websocket_rate_limit_delay': '0.2',
'smtp_ssl_certificate_filename': '',
'smtp_ssl_private_key_filename': '',
'websocket_keep_alive_timeout': 3600,
'websocket_rate_limit_burst': 10,
'websocket_rate_limit_delay': 0.2,
'x_sendfile': False,
'limit_time_worker_cron': 0,
}
if IS_POSIX:
# multiprocessing
assert_options.update(
{
'workers': 0,
'limit_memory_soft': 2048 * 1024 * 1024,
'limit_memory_soft_gevent': False,
'limit_memory_hard': 2560 * 1024 * 1024,
'limit_memory_hard_gevent': False,
'limit_time_cpu': 60,
'limit_time_real': 120,
'limit_time_real_cron': -1,
'limit_request': 1 << 16,
}
)
})
config._parse_config()
with self.assertNoLogs('py.warnings'):
config._warn_deprecated_options()
self.assertEqual(config.options, assert_options, "Options don't match")
def missing(*options):
return [
f"WARNING:odoo.tools.config:unknown option '{option}' in "
f"the config file at {config_path}, option stored as-is, "
"without parsing"
for option in options
]
def falsy(*options):
return [
f"WARNING:odoo.tools.config:option {option} reads 'False' "
f"in the config file at {config_path} but isn't a boolean "
"option, skip"
for option in options
]
self.assertEqual(capture.output,
missing('demo', 'geoip_database', 'osv_memory_age_limit')
+ falsy(
'db_host', 'db_name', 'db_password', 'db_port',
'db_user', 'email_from', 'from_filter', 'log_db',
'smtp_password', 'smtp_ssl_certificate_filename',
'smtp_ssl_private_key_filename', 'smtp_user',
)
+ missing('translate_modules'),
)
self.assertEqual(capture_warn.output, [
'WARNING:odoo.tools.config:missing --http-interface/http_interface, '
'using 0.0.0.0 by default, will change to 127.0.0.1 in 20.0',
])
def test_05_repeat_parse_config(self):
"""Emulate multiple calls to parse_config()"""
config = configmanager()
config._parse_config()
config._warn_deprecated_options()
config._parse_config()
config._warn_deprecated_options()
with self.assertLogs('odoo.tools.config', 'WARNING') as capture:
config = configmanager()
config._parse_config()
config._warn_deprecated_options()
config._parse_config()
config._warn_deprecated_options()
self.assertEqual(capture.output, [MISSING_HTTP_INTERFACE] * 2)
def test_06_cli(self):
config = configmanager(fname=file_path('base/tests/config/empty.conf'))
with file_open('base/tests/config/cli') as file:
config._parse_config(file.read().split())
with self.assertLogs('odoo.tools.config', 'WARNING') as capture:
self.config._parse_config(file.read().split())
self.assertEqual(capture.output, [
"WARNING:odoo.tools.config:test file '/tmp/file-file' cannot be found",
])
values = {
self.assertConfigEqual({
# options not exposed on the command line
'admin_passwd': 'admin',
'bin_path': '',
'csv_internal_sep': ',',
'default_productivity_apps': False,
'proxy_access_token': '',
'publisher_warranty_url': 'http://services.odoo.com/publisher-warranty/',
'reportgz': False,
'root_path': f'{ROOT_PATH}/odoo',
'websocket_rate_limit_burst': 10,
'websocket_rate_limit_delay': .2,
'websocket_keep_alive_timeout': 3600,
# common
'config': None,
'save': None,
'init': {'hr': 1, 'stock': 1},
'update': {'account': 1, 'website': 1},
'without_demo': 'rigolo',
'demo': {},
'config': EMPTY_CONFIG_PATH,
'save': False,
'init': {'hr': True, 'stock': True},
'update': {'account': True, 'website': True},
'reinit': ['account'],
'with_demo': True,
'import_file_maxbytes': 10485760,
'import_file_timeout': 3,
'import_partial': '/tmp/import-partial',
'import_url_regex': '^(?:http|https)://',
'pidfile': '/tmp/pidfile',
'addons_path': f'{ROOT_PATH}/odoo/addons,{ROOT_PATH}/addons',
'upgrade_path': '',
'pre_upgrade_scripts': '',
'server_wide_modules': 'base,mail',
'addons_path': [],
'upgrade_path': [],
'pre_upgrade_scripts': [],
'server_wide_modules': ['web', 'base', 'mail'],
'data_dir': '/tmp/data-dir',
# HTTP
@ -457,9 +527,8 @@ class TestConfigManager(TransactionCase):
'logfile': '/tmp/odoo.log',
'syslog': False,
'log_handler': [
':INFO',
'odoo.tools.config:DEBUG',
':WARNING',
'odoo.tools.config:DEBUG',
'odoo.http:DEBUG',
'odoo.sql_db:DEBUG',
],
@ -479,7 +548,7 @@ class TestConfigManager(TransactionCase):
'smtp_ssl_private_key_filename': '/tmp/tlskey',
# database
'db_name': 'horizon',
'db_name': ['horizon'],
'db_user': 'kiwi',
'db_password': 'Tigrou0073',
'pg_path': '/tmp/pg_path',
@ -491,21 +560,16 @@ class TestConfigManager(TransactionCase):
'db_template': 'backup1706',
'db_replica_host': 'db2.localhost',
'db_replica_port': 2038,
'db_app_name': 'myapp{pid}',
# i18n
'load_language': 'fr_FR',
'language': 'fr_FR',
'translate_out': '/tmp/translate_out.csv',
'translate_in': '/tmp/translate_in.csv',
'overwrite_existing_translations': True,
'translate_modules': ['hr', 'mail', 'stock'],
# security
'list_db': False,
# advanced
'dev_mode': ['xml', 'reload'],
'shell_interface': 'ipython',
'stop_after_init': True,
'osv_memory_count_limit': 71,
'transient_age_limit': 4.0,
@ -514,21 +578,185 @@ class TestConfigManager(TransactionCase):
'unaccent': True,
'geoip_city_db': '/tmp/city.db',
'geoip_country_db': '/tmp/country.db',
}
if IS_POSIX:
# multiprocessing
values.update(
{
'workers': 92,
'limit_memory_soft': 1048576,
'limit_memory_soft_gevent': 1048577,
'limit_memory_hard': 1048578,
'limit_memory_hard_gevent': 1048579,
'limit_time_cpu': 60,
'limit_time_real': 61,
'limit_time_real_cron': 62,
'limit_request': 100,
}
)
self.assertEqual(config.options, values)
'workers': 92,
'limit_memory_soft': 1048576,
'limit_memory_soft_gevent': 1048577,
'limit_memory_hard': 1048578,
'limit_memory_hard_gevent': 1048579,
'limit_time_cpu': 60,
'limit_time_real': 61,
'limit_time_real_cron': 62,
'limit_request': 100,
})
def test_07_environ(self):
with file_open('base/tests/config/environ') as file:
os.environ.update({
x[0]: x[2]
for line in file.readlines()
if (x := line.rstrip('\n').partition('=')) and x[0]
and not line.startswith('#')
})
self.config._parse_config()
self.assertConfigEqual({
# options not exposed on the command line
'admin_passwd': 'admin',
'bin_path': '',
'csv_internal_sep': ',',
'default_productivity_apps': False,
'proxy_access_token': '',
'publisher_warranty_url': 'http://services.odoo.com/publisher-warranty/',
'reportgz': False,
'websocket_rate_limit_burst': 10,
'websocket_rate_limit_delay': .2,
'websocket_keep_alive_timeout': 3600,
# common
'config': EMPTY_CONFIG_PATH,
'save': False,
'init': {},
'update': {},
'reinit': [],
'with_demo': True,
'import_file_maxbytes': 10485760,
'import_file_timeout': 3,
'import_partial': '',
'import_url_regex': '^(?:http|https)://',
'pidfile': '/tmp/pidfile',
'addons_path': [],
'upgrade_path': [],
'pre_upgrade_scripts': [],
'server_wide_modules': ['web', 'base', 'mail'],
'data_dir': '/tmp/data-dir',
# HTTP
'http_interface': '10.0.0.254',
'http_port': 6942,
'gevent_port': 8012,
'http_enable': False,
'proxy_mode': True,
'x_sendfile': True,
# web
'dbfilter': '.*',
# testing
'test_file': '',
'test_enable': False,
'test_tags': None,
'screencasts': '/tmp/screencasts',
'screenshots': '/tmp/screenshots',
# logging
'logfile': '/tmp/odoo.log',
'syslog': False,
'log_handler': [
':WARNING',
'odoo.tools.config:DEBUG',
],
'log_db': 'logdb',
'log_db_level': 'debug',
'log_level': 'debug',
# SMTP
'email_from': 'admin@example.com',
'from_filter': '.*',
'smtp_server': 'smtp.localhost',
'smtp_port': 1299,
'smtp_ssl': True,
'smtp_user': 'spongebob',
'smtp_password': 'Tigrou0072',
'smtp_ssl_certificate_filename': '/tmp/tlscert',
'smtp_ssl_private_key_filename': '/tmp/tlskey',
# database
'db_name': ['horizon'],
'db_user': 'kiwi',
'db_password': 'Tigrou0073',
'pg_path': '/tmp/pg_path',
'db_host': 'db.localhost',
'db_port': 4269,
'db_sslmode': 'verify-full',
'db_maxconn': 42,
'db_maxconn_gevent': 100,
'db_template': 'backup1706',
'db_replica_host': 'db2.localhost',
'db_replica_port': 2038,
'db_app_name': 'envapp',
# i18n (not loaded)
'load_language': None,
'overwrite_existing_translations': False,
# security
'list_db': False,
# advanced
'dev_mode': ['xml', 'reload'],
'stop_after_init': False, # not on env
'osv_memory_count_limit': 71,
'transient_age_limit': 4.0,
'max_cron_threads': 4,
'limit_time_worker_cron': 0,
'unaccent': True,
'geoip_city_db': '/tmp/city.db',
'geoip_country_db': '/tmp/country.db',
'workers': 92,
'limit_memory_soft': 1048576,
'limit_memory_soft_gevent': 1048577,
'limit_memory_hard': 1048578,
'limit_memory_hard_gevent': 1048579,
'limit_time_cpu': 60,
'limit_time_real': 61,
'limit_time_real_cron': 62,
'limit_request': 100,
})
@patch('optparse.OptionParser.error')
def test_06_syslog_logfile_exclusive_cli(self, error):
self.parse_reset(['--syslog', '--logfile', 'logfile'])
self.parse_reset(['-c', file_path('base/tests/config/sysloglogfile.conf')])
error.assert_has_calls(2 * [call("the syslog and logfile options are exclusive")])
@patch('optparse.OptionParser.error')
def test_10_init_update_incompatible_with_multidb(self, error):
self.parse_reset(['-d', 'db1,db2', '-i', 'base'])
self.parse_reset(['-d', 'db1,db2', '-u', 'base'])
self.parse_reset(['-c', file_path('base/tests/config/multidb.conf'), '-i', 'base'])
self.parse_reset(['-c', file_path('base/tests/config/multidb.conf'), '-u', 'base'])
error.assert_has_calls(4 * [call("Cannot use -i/--init or -u/--update with multiple databases in the -d/--database/db_name")])
def test_11_auto_stop_after_init_after_test(self):
for args, stop_after_init in [
([], False),
(['--stop'], True),
(['--test-enable'], True),
(['--test-tags', 'tag'], True),
(['--test-file', __file__], True),
]:
with self.subTest(args=args):
if any('--test' in arg for arg in args):
with self.assertLogs('odoo.tools.config', 'WARNING') as capture:
_, options = self.parse_reset(args)
self.assertEqual(capture.output, [
"WARNING:odoo.tools.config:Empty -d/--database/db_name, tests won't run",
])
else:
_, options = self.parse_reset(args)
self.assertEqual(options['stop_after_init'], stop_after_init)
def test_13_empty_db_replica_host(self):
with self.assertLogs('py.warnings', 'WARNING') as capture:
_, options = self.parse_reset(['--db_replica_host', ''])
self.assertIsNone(options['db_replica_host'])
self.assertEqual(options['dev_mode'], ['replica'])
self.assertEqual(len(capture.output), 1)
self.assertIn('Since 19.0, an empty --db_replica_host', capture.output[0])
with self.assertNoLogs('py.warnings', 'WARNING'):
_, options = self.parse_reset(['--db_replica_host', '', '--dev', 'replica'])
self.assertIsNone(options['db_replica_host'])
self.assertEqual(options['dev_mode'], ['replica'])

View file

@ -1,11 +1,40 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import date
from datetime import date, datetime, time, timedelta, timezone
from functools import partial
from odoo.tests import BaseCase
from odoo.tools.date_utils import get_fiscal_year
import pytz
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from odoo.tests import BaseCase, TransactionCase
from odoo.tools.date_utils import (
add,
date_range,
end_of,
get_fiscal_year,
localized,
parse_date,
parse_iso_date,
start_of,
subtract,
to_timezone,
)
class TestDateUtils(BaseCase):
class TestDateUtils(TransactionCase):
@freeze_time('2024-05-01 14:00:00')
def test_localized_timezone(self):
dt = datetime.now()
self.assertIsNotNone(localized(dt).tzinfo)
tz = timezone(timedelta(hours=5))
self.assertIs(localized(dt.astimezone(tz)).tzinfo, tz)
dtz = dt.astimezone(tz)
self.assertEqual(dtz.hour, 19)
self.assertIs(dtz.tzinfo, tz)
self.assertEqual(to_timezone(None)(dtz), dt)
self.assertEqual(to_timezone(tz)(dt), dtz)
def test_fiscal_year(self):
self.assertEqual(get_fiscal_year(date(2024, 12, 31)), (date(2024, 1, 1), date(2024, 12, 31)))
@ -24,3 +53,335 @@ class TestDateUtils(BaseCase):
self.assertEqual(get_fiscal_year(date(2024, 2, 29), 28, 2), (date(2023, 3, 1), date(2024, 2, 29)))
self.assertEqual(get_fiscal_year(date(2023, 2, 28), 28, 2), (date(2022, 3, 1), date(2023, 2, 28)))
self.assertEqual(get_fiscal_year(date(2023, 2, 28), 29, 2), (date(2022, 3, 1), date(2023, 2, 28)))
def test_offset_utils(self):
""" test date/datetime fields helpers """
d = date(2077, 10, 23)
dt = datetime(2077, 10, 23, 9, 42)
# addition
self.assertEqual(add(d, days=5), date(2077, 10, 28))
self.assertEqual(add(dt, seconds=10), datetime(2077, 10, 23, 9, 42, 10))
# subtraction
self.assertEqual(subtract(d, months=1), date(2077, 9, 23))
self.assertEqual(subtract(dt, hours=2), datetime(2077, 10, 23, 7, 42, 0))
# start_of
# year
self.assertEqual(start_of(d, 'year'), date(2077, 1, 1))
self.assertEqual(start_of(dt, 'year'), datetime(2077, 1, 1))
# quarter
q1 = date(2077, 1, 1)
q2 = date(2077, 4, 1)
q3 = date(2077, 7, 1)
q4 = date(2077, 10, 1)
self.assertEqual(start_of(d.replace(month=3), 'quarter'), q1)
self.assertEqual(start_of(d.replace(month=5), 'quarter'), q2)
self.assertEqual(start_of(d.replace(month=7), 'quarter'), q3)
self.assertEqual(start_of(d, 'quarter'), q4)
self.assertEqual(start_of(dt, 'quarter'), datetime.combine(q4, time.min))
# month
self.assertEqual(start_of(d, 'month'), date(2077, 10, 1))
self.assertEqual(start_of(dt, 'month'), datetime(2077, 10, 1))
# week
self.assertEqual(start_of(d, 'week'), date(2077, 10, 18))
self.assertEqual(start_of(dt, 'week'), datetime(2077, 10, 18))
# day
self.assertEqual(start_of(d, 'day'), d)
self.assertEqual(start_of(dt, 'day'), dt.replace(hour=0, minute=0, second=0))
# hour
with self.assertRaises(ValueError):
start_of(d, 'hour')
self.assertEqual(start_of(dt, 'hour'), dt.replace(minute=0, second=0))
# invalid
with self.assertRaises(ValueError):
start_of(dt, 'poop')
# end_of
# year
self.assertEqual(end_of(d, 'year'), d.replace(month=12, day=31))
self.assertEqual(end_of(dt, 'year'),
datetime.combine(d.replace(month=12, day=31), time.max))
# quarter
q1 = date(2077, 3, 31)
q2 = date(2077, 6, 30)
q3 = date(2077, 9, 30)
q4 = date(2077, 12, 31)
self.assertEqual(end_of(d.replace(month=2), 'quarter'), q1)
self.assertEqual(end_of(d.replace(month=4), 'quarter'), q2)
self.assertEqual(end_of(d.replace(month=9), 'quarter'), q3)
self.assertEqual(end_of(d, 'quarter'), q4)
self.assertEqual(end_of(dt, 'quarter'), datetime.combine(q4, time.max))
# month
self.assertEqual(end_of(d, 'month'), d.replace(day=31))
self.assertEqual(end_of(dt, 'month'),
datetime.combine(date(2077, 10, 31), time.max))
# week
self.assertEqual(end_of(d, 'week'), date(2077, 10, 24))
self.assertEqual(end_of(dt, 'week'),
datetime.combine(datetime(2077, 10, 24), time.max))
# day
self.assertEqual(end_of(d, 'day'), d)
self.assertEqual(end_of(dt, 'day'), datetime.combine(dt, time.max))
# hour
with self.assertRaises(ValueError):
end_of(d, 'hour')
self.assertEqual(end_of(dt, 'hour'),
datetime.combine(dt, time.max).replace(hour=dt.hour))
# invalid
with self.assertRaises(ValueError):
end_of(dt, 'crap')
def test_parse_iso_date(self):
self.assertEqual(parse_iso_date('2024-01-05'), date(2024, 1, 5))
self.assertEqual(parse_iso_date('2024-01-05 00:30:00'), datetime(2024, 1, 5, 0, 30))
self.assertEqual(parse_iso_date('2024-01-05 00:00:00'), datetime(2024, 1, 5))
with self.assertRaises(ValueError):
parse_iso_date('2024-01-05 00:00:00+02:00')
with self.assertRaises(ValueError):
parse_iso_date('123')
with self.assertRaises(ValueError):
parse_iso_date('2024-14-05')
with self.assertRaises(ValueError):
parse_iso_date('2024-14-05 11')
def test_parse_date(self):
env = self.env
self.assertEqual(parse_date('2024-01-05', env), date(2024, 1, 5))
self.assertEqual(parse_date('2024-01-05 00:30:00', env), datetime(2024, 1, 5, 0, 30))
self.assertEqual(parse_date('2024-01-05 00:00:00', env), datetime(2024, 1, 5))
with self.assertRaises(ValueError):
parse_date('2024-01-05 00:00:00+02:00', env)
@freeze_time('2024-01-05 13:05:00')
def test_parse_date_relative_utc(self):
self.env["res.lang"]._lang_get(self.env.user.lang).week_start = "1"
env = self.env(context={'tz': 'UTC'})
parse = partial(parse_date, env=env)
self.assertEqual(parse('=1d'), datetime(2024, 1, 1))
self.assertEqual(parse('=2000y'), datetime(2000, 1, 5))
self.assertEqual(parse('+3d'), datetime(2024, 1, 8, 13, 5))
self.assertEqual(parse('-1m'), datetime(2023, 12, 5, 13, 5))
self.assertEqual(parse('+3d +1w -1m -2y'), datetime(2021, 12, 15, 13, 5))
self.assertEqual(parse('-02H -15M'), datetime(2024, 1, 5, 10, 50))
self.assertEqual(parse('=02H =15M'), datetime(2024, 1, 5, 2, 15))
self.assertEqual(parse('+02H +15M'), datetime(2024, 1, 5, 15, 20))
self.assertEqual(parse('=11d +2H +15M'), datetime(2024, 1, 11, 2, 15))
self.assertEqual(parse('today'), date(2024, 1, 5))
self.assertEqual(parse('today +1w'), date(2024, 1, 12))
# 2024-01-05 is Friday
self.assertEqual(parse('=monday'), datetime(2024, 1, 1))
self.assertEqual(parse('=sunday'), datetime(2024, 1, 7))
# next Monday, previous Monday
self.assertEqual(parse('+monday'), datetime(2024, 1, 8, 13, 5))
self.assertEqual(parse('-monday'), datetime(2024, 1, 1, 13, 5))
# next Friday, previous Friday -> same Friday!
self.assertEqual(parse('+friday'), datetime(2024, 1, 5, 13, 5))
self.assertEqual(parse('-friday'), datetime(2024, 1, 5, 13, 5))
# actual next Friday, actual previous Friday
self.assertEqual(parse('+1d +friday'), datetime(2024, 1, 12, 13, 5))
self.assertEqual(parse('-1d -friday'), datetime(2023, 12, 29, 13, 5))
# next Sunday, previous Sunday
self.assertEqual(parse('+sunday'), datetime(2024, 1, 7, 13, 5))
self.assertEqual(parse('-sunday'), datetime(2023, 12, 31, 13, 5))
# week_start = 1 (Monday)
self.assertEqual(parse('=week_start'), datetime(2024, 1, 1))
self.assertEqual(parse('+week_start'), datetime(2024, 1, 8, 13, 5))
self.assertEqual(parse('-week_start'), datetime(2024, 1, 1, 13, 5))
# week_start = 6 (Saturday)
self.env["res.lang"]._lang_get(self.env.user.lang).week_start = "6"
self.assertEqual(parse('=week_start'), datetime(2023, 12, 30))
self.assertEqual(parse('+week_start'), datetime(2024, 1, 6, 13, 5))
self.assertEqual(parse('-week_start'), datetime(2023, 12, 30, 13, 5))
self.assertEqual(parse('=sunday'), datetime(2023, 12, 31))
# week_start = 5 (Friday)
self.env["res.lang"]._lang_get(self.env.user.lang).week_start = "5"
self.assertEqual(parse('=week_start'), datetime(2024, 1, 5))
self.assertEqual(parse('+week_start'), datetime(2024, 1, 5, 13, 5))
self.assertEqual(parse('-week_start'), datetime(2024, 1, 5, 13, 5))
self.assertEqual(parse('=thursday'), datetime(2024, 1, 11))
self.assertEqual(parse('=friday'), datetime(2024, 1, 5))
@freeze_time('2024-01-05 13:05:00')
def test_parse_date_relative_tz(self):
env = self.env(context={'tz': 'Etc/GMT-1'})
parse = partial(parse_date, env=env)
self.assertEqual(parse('now'), datetime(2024, 1, 5, 13, 5))
self.assertEqual(parse('=5H'), datetime(2024, 1, 5, 4))
self.assertEqual(parse('-55M'), datetime(2024, 1, 5, 12, 10))
self.assertEqual(parse('today'), date(2024, 1, 5))
with freeze_time('2024-01-04 23:05:00'):
self.assertEqual(parse('today'), date(2024, 1, 5))
class TestDateRangeFunction(BaseCase):
""" Test on date_range generator. """
def test_date_range_with_naive_datetimes(self):
""" Check date_range with naive datetimes. """
start = datetime(1985, 1, 1)
end = datetime(1986, 1, 1)
expected = [
datetime(1985, 1, 1, 0, 0),
datetime(1985, 2, 1, 0, 0),
datetime(1985, 3, 1, 0, 0),
datetime(1985, 4, 1, 0, 0),
datetime(1985, 5, 1, 0, 0),
datetime(1985, 6, 1, 0, 0),
datetime(1985, 7, 1, 0, 0),
datetime(1985, 8, 1, 0, 0),
datetime(1985, 9, 1, 0, 0),
datetime(1985, 10, 1, 0, 0),
datetime(1985, 11, 1, 0, 0),
datetime(1985, 12, 1, 0, 0),
datetime(1986, 1, 1, 0, 0)
]
dates = list(date_range(start, end))
self.assertEqual(dates, expected)
def test_date_range_with_date(self):
""" Check date_range with naive datetimes. """
start = date(1985, 1, 1)
end = date(1986, 1, 1)
expected = [
date(1985, 1, 1),
date(1985, 2, 1),
date(1985, 3, 1),
date(1985, 4, 1),
date(1985, 5, 1),
date(1985, 6, 1),
date(1985, 7, 1),
date(1985, 8, 1),
date(1985, 9, 1),
date(1985, 10, 1),
date(1985, 11, 1),
date(1985, 12, 1),
date(1986, 1, 1),
]
self.assertEqual(list(date_range(start, end)), expected)
def test_date_range_with_timezone_aware_datetimes_other_than_utc(self):
""" Check date_range with timezone-aware datetimes other than UTC."""
timezone = pytz.timezone('Europe/Brussels')
start = datetime(1985, 1, 1)
end = datetime(1986, 1, 1)
start = timezone.localize(start)
end = timezone.localize(end)
expected = [datetime(1985, 1, 1, 0, 0),
datetime(1985, 2, 1, 0, 0),
datetime(1985, 3, 1, 0, 0),
datetime(1985, 4, 1, 0, 0),
datetime(1985, 5, 1, 0, 0),
datetime(1985, 6, 1, 0, 0),
datetime(1985, 7, 1, 0, 0),
datetime(1985, 8, 1, 0, 0),
datetime(1985, 9, 1, 0, 0),
datetime(1985, 10, 1, 0, 0),
datetime(1985, 11, 1, 0, 0),
datetime(1985, 12, 1, 0, 0),
datetime(1986, 1, 1, 0, 0)]
expected = [timezone.localize(e) for e in expected]
dates = list(date_range(start, end))
self.assertEqual(expected, dates)
def test_date_range_with_mismatching_zones(self):
""" Check date_range with mismatching zone should raise an exception."""
start_timezone = pytz.timezone('Europe/Brussels')
end_timezone = pytz.timezone('America/Recife')
start = datetime(1985, 1, 1)
end = datetime(1986, 1, 1)
start = start_timezone.localize(start)
end = end_timezone.localize(end)
with self.assertRaises(ValueError):
list(date_range(start, end))
def test_date_range_with_inconsistent_datetimes(self):
""" Check date_range with a timezone-aware datetime and a naive one."""
context_timezone = pytz.timezone('Europe/Brussels')
start = datetime(1985, 1, 1)
end = datetime(1986, 1, 1)
end = context_timezone.localize(end)
with self.assertRaises(ValueError):
list(date_range(start, end))
def test_date_range_with_hour(self):
""" Test date range with hour and naive datetime."""
start = datetime(2018, 3, 25)
end = datetime(2018, 3, 26)
step = relativedelta(hours=1)
expected = [
datetime(2018, 3, 25, 0, 0),
datetime(2018, 3, 25, 1, 0),
datetime(2018, 3, 25, 2, 0),
datetime(2018, 3, 25, 3, 0),
datetime(2018, 3, 25, 4, 0),
datetime(2018, 3, 25, 5, 0),
datetime(2018, 3, 25, 6, 0),
datetime(2018, 3, 25, 7, 0),
datetime(2018, 3, 25, 8, 0),
datetime(2018, 3, 25, 9, 0),
datetime(2018, 3, 25, 10, 0),
datetime(2018, 3, 25, 11, 0),
datetime(2018, 3, 25, 12, 0),
datetime(2018, 3, 25, 13, 0),
datetime(2018, 3, 25, 14, 0),
datetime(2018, 3, 25, 15, 0),
datetime(2018, 3, 25, 16, 0),
datetime(2018, 3, 25, 17, 0),
datetime(2018, 3, 25, 18, 0),
datetime(2018, 3, 25, 19, 0),
datetime(2018, 3, 25, 20, 0),
datetime(2018, 3, 25, 21, 0),
datetime(2018, 3, 25, 22, 0),
datetime(2018, 3, 25, 23, 0),
datetime(2018, 3, 26, 0, 0)
]
dates = list(date_range(start, end, step))
self.assertEqual(dates, expected)
def test_step_is_positive(self):
start = datetime(2018, 3, 25)
end = datetime(2018, 3, 26)
with self.assertRaises(ValueError):
list(date_range(start, end, relativedelta()))
with self.assertRaises(ValueError):
list(date_range(start, end, relativedelta(hours=-1)))

View file

@ -8,11 +8,12 @@ from unittest.mock import patch
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_REPEATABLE_READ
import odoo
from odoo import api
from odoo.modules.registry import Registry
from odoo.sql_db import db_connect, TestCursor
from odoo.sql_db import db_connect
from odoo.tests import common
from odoo.tests.common import BaseCase, HttpCase
from odoo.tests.test_cursor import TestCursor
from odoo.tools.misc import config
ADMIN_USER_ID = common.ADMIN_USER_ID
@ -127,12 +128,11 @@ class TestTestCursor(common.TransactionCase):
def setUp(self):
super().setUp()
# make the registry in test mode
self.registry.enter_test_mode(self.cr)
self.addCleanup(self.registry.leave_test_mode)
self.registry_enter_test_mode()
# now we make a test cursor for self.cr
self.cr = self.registry.cursor()
self.addCleanup(self.cr.close)
self.env = odoo.api.Environment(self.cr, odoo.SUPERUSER_ID, {})
self.env = api.Environment(self.cr, api.SUPERUSER_ID, {})
self.record = self.env['res.partner'].create({'name': 'Foo'})
def write(self, record, value):
@ -218,7 +218,10 @@ class TestTestCursor(common.TransactionCase):
RELEASE SAVEPOINT B -- "savepoint b does not exist"
"""
a = self.registry.cursor()
_b = self.registry.cursor()
b = self.registry.cursor()
# This forces the savepoint to be created
a._check_savepoint()
b._check_savepoint()
# `a` should warn that it found un-closed cursor `b` when trying to close itself
with self.assertLogs('odoo.sql_db', level=logging.WARNING) as cm:
a.close()
@ -226,7 +229,9 @@ class TestTestCursor(common.TransactionCase):
self.assertIn('WARNING:odoo.sql_db:Found different un-closed cursor', msg)
# avoid a warning on teardown (when self.cr finds a still on the stack)
# as well as ensure the stack matches our expectations
self.assertEqual(a._cursors_stack.pop(), a)
with self.assertRaises(psycopg2.errors.InvalidSavepointSpecification):
with self.assertLogs('odoo.sql_db', level=logging.WARNING) as cm:
b.close()
def test_borrow_connection(self):
"""Tests the behavior of the postgresql connection pool recycling/borrowing"""
@ -310,8 +315,7 @@ class TestCursorHooks(common.TransactionCase):
self.assertEqual(self.log, ['preR', 'postR'])
def test_hooks_on_testcursor(self):
self.registry.enter_test_mode(self.cr)
self.addCleanup(self.registry.leave_test_mode)
self.registry_enter_test_mode()
cr = self.registry.cursor()

View file

@ -1,24 +1,41 @@
import contextlib
from lxml import etree
from odoo.exceptions import UserError
from odoo.tests import Form
from odoo.tests.common import TransactionCase, tagged
IGNORE_MODEL_NAMES = {
IGNORE_MODEL_NAMES_DISPLAY_NAME = {
'ir.attachment',
'test_new_api.attachment',
'test_orm.attachment',
'payment.link.wizard',
'account.multicurrency.revaluation.wizard',
'account_followup.manual_reminder',
'product.fetch.image.wizard',
}
IGNORE_MODEL_NAMES_NEW_FORM = {
'account.report.line', # only used as wizard, and display_name isn't compute in a wizard but Form add display_name automatically
'chatbot.script.step', # only used as wizard
'stock.warehouse', # avoid warning "Creating a new warehouse will automatically activate the Storage Locations setting"
'website.visitor', # Visitors can only be created through the frontend.
'marketing.activity', # only used as wizard and always used form marketing.campaign
'crm.stage', # Avoid warning "Changing the value of 'Is Won Stage' may induce ..."
}
IGNORE_COMPUTED_FIELDS = {
'account.payment.register.payment_token_id', # must be computed within a specific environment
}
@tagged('-at_install', 'post_install')
class TestEveryModel(TransactionCase):
def test_display_name_new_record(self):
for model_name in self.registry:
model = self.env[model_name]
if model._abstract or not model._auto or model_name in IGNORE_MODEL_NAMES:
if model._abstract or not model._auto or model_name in IGNORE_MODEL_NAMES_DISPLAY_NAME:
continue
with self.subTest(
@ -32,3 +49,57 @@ class TestEveryModel(TransactionCase):
fields_spec = dict.fromkeys(fields_used + ['display_name'], {})
with contextlib.suppress(UserError):
model.onchange({}, [], fields_spec)
def test_form_new_record(self):
allowed_models = set(self.env['ir.model.access']._get_allowed_models('create'))
allowed_models -= IGNORE_MODEL_NAMES_NEW_FORM
for model_name, model in self.env.items():
if (
model._abstract
or model._transient
or not model._auto
or model_name not in allowed_models
):
continue
default_form_id = self.env['ir.ui.view'].default_view(model_name, 'form')
if not default_form_id:
continue
default_form = self.env['ir.ui.view'].browse(default_form_id)
if not default_form.arch:
continue
view_elem = etree.fromstring(default_form.arch)
if view_elem.get('create') in ('0', 'false'):
continue
with self.subTest(
msg="Create a new record from form view doesn't work (first onchange call).",
model=model_name,
), contextlib.suppress(UserError):
# Test to open the Form view to check first onchange
Form(model)
def test_computed_fields_without_dependencies(self):
for model in self.env.values():
if model._abstract or not model._auto:
continue
for field in model._fields.values():
if str(field) in IGNORE_COMPUTED_FIELDS:
continue
if not field.compute or self.registry.field_depends[field]:
continue
# ignore if the field does not appear in a form view
domain = [
('model', '=', model._name),
('type', '=', 'form'),
('arch_db', 'like', field.name),
]
if not self.env['ir.ui.view'].search_count(domain, limit=1):
continue
with self.subTest(msg=f"Compute method of {field} should work on new record."):
with self.env.cr.savepoint():
model.new()[field.name]

View file

@ -18,8 +18,8 @@ class TestFormCreate(TransactionCase):
# By default, it's the `group` `group_account_readonly` which is required to see it, in the `account` module
# But once `account_accountant` gets installed, it becomes `account.group_account_user`
# https://github.com/odoo/enterprise/commit/68f6c1f9fd3ff6762c98e1a405ade035129efce0
self.env.user.groups_id += self.env.ref('account.group_account_readonly')
self.env.user.groups_id += self.env.ref('account.group_account_user')
self.env.user.group_ids += self.env.ref('account.group_account_readonly')
self.env.user.group_ids += self.env.ref('account.group_account_user')
partner_form = Form(self.env['res.partner'])
partner_form.name = 'a partner'
# YTI: Clean that brol

View file

@ -60,3 +60,23 @@ class TestPartnerFormatAddress(FormatAddressCase):
def test_address_view(self):
self.env.company.country_id = self.env.ref('base.us')
self.assertAddressView('res.partner')
def test_display_name_address_formatting(self):
france = self.env.ref('base.fr')
partner = self.env['res.partner'].create({
'name': 'John Doe',
'street': '123 Main Street',
'street2': '',
'city': 'Paris',
'country_id': france.id,
})
# Default display_name without context
self.assertIn('John Doe', partner.display_name)
# display_name with show_address context
display_name = partner.with_context(show_address=True).display_name
self.assertIn('123 Main Street', display_name)
self.assertIn('Paris', display_name)
self.assertNotIn('\n\n', display_name)

View file

@ -531,7 +531,7 @@ class TestGroupsOdoo(common.TransactionCase):
self.assertEqual(str(parse('base.group_user') & parse('base.group_system')), "'base.group_system'")
self.assertEqual(str(parse('base.group_system') & parse('base.group_user')), "'base.group_system'")
self.assertEqual(str(parse('base.group_erp_manager') & parse('base.group_system')), "'base.group_system'")
self.assertEqual(str(parse('base.group_system') & parse('base.group_allow_export')), "'base.group_system' & 'base.group_allow_export'")
self.assertEqual(str(parse('base.group_system') & parse('base.group_multi_currency')), "'base.group_system' & 'base.group_multi_currency'")
self.assertEqual(str(parse('base.group_user') | parse('base.group_user')), "'base.group_user'")
self.assertEqual(str(parse('base.group_user') | parse('base.group_system')), "'base.group_user'")
self.assertEqual(str(parse('base.group_system') | parse('base.group_public')), "'base.group_system' | 'base.group_public'")
@ -562,7 +562,7 @@ class TestGroupsOdoo(common.TransactionCase):
self.assertEqual(str(parse('!base.group_user') & parse('base.group_portal,base.group_user')), "'base.group_portal'")
self.assertEqual(str(parse('base.group_user') & parse('base.group_portal,!base.group_user')), "~*")
self.assertEqual(str(parse('!base.group_user') & parse('base.group_portal,!base.group_system')), "'base.group_portal'")
self.assertEqual(str(parse('!base.group_user,base.group_allow_export') & parse('base.group_allow_export,!base.group_system')), "~'base.group_user' & 'base.group_allow_export'")
self.assertEqual(str(parse('!base.group_user,base.group_multi_currency') & parse('base.group_multi_currency,!base.group_system')), "~'base.group_user' & 'base.group_multi_currency'")
self.assertEqual(str(parse('!base.group_user,base.group_portal') & parse('base.group_portal,!base.group_system')), "'base.group_portal'")
self.assertEqual(str(parse('!*') & parse('base.group_portal')), "~*")
self.assertEqual(str(parse('*') & parse('base.group_portal')), "'base.group_portal'")
@ -575,7 +575,7 @@ class TestGroupsOdoo(common.TransactionCase):
self.assertEqual(str(parse('base.group_user') & parse('base.group_portal,!base.group_system')), "~*")
self.assertEqual(str(parse('base.group_user,base.group_system') & parse('base.group_system,base.group_portal')), "'base.group_system'")
self.assertEqual(str(parse('base.group_user') & parse('base.group_system,base.group_portal')), "'base.group_system'")
self.assertEqual(str(parse('base.group_user,base.group_system') & parse('base.group_allow_export')), "'base.group_user' & 'base.group_allow_export'")
self.assertEqual(str(parse('base.group_user,base.group_system') & parse('base.group_multi_currency')), "'base.group_user' & 'base.group_multi_currency'")
self.assertEqual(str(parse('base.group_user,base.group_erp_manager') | parse('base.group_system')), "'base.group_user'")
self.assertEqual(str(parse('base.group_user') | parse('base.group_portal,base.group_system')), "'base.group_user' | 'base.group_portal'")
self.assertEqual(str(parse('!*') | parse('base.group_user')), "'base.group_user'")
@ -596,12 +596,12 @@ class TestGroupsOdoo(common.TransactionCase):
self.assertEqual(parse('base.group_system,base.group_public') <= parse('base.group_system,base.group_public'), True)
self.assertEqual(parse('base.group_system,base.group_public') <= parse('base.group_user,base.group_public'), True)
self.assertEqual(parse('base.group_system,!base.group_public') <= parse('base.group_system'), True)
self.assertEqual(parse('base.group_system,!base.group_allow_export') <= parse('base.group_system'), True)
self.assertEqual(parse('base.group_system') <= parse('base.group_system,!base.group_allow_export'), False)
self.assertEqual(parse('base.group_system,!base.group_multi_currency') <= parse('base.group_system'), True)
self.assertEqual(parse('base.group_system') <= parse('base.group_system,!base.group_multi_currency'), False)
self.assertEqual(parse('base.group_system') <= parse('base.group_system,!base.group_public'), True)
self.assertEqual(parse('base.group_system') == parse('base.group_system,!base.group_public'), True)
self.assertEqual(parse('!base.group_public,!base.group_portal') <= parse('!base.group_public'), True)
self.assertEqual(parse('base.group_user,!base.group_allow_export') <= parse('base.group_user,!base.group_system,!base.group_allow_export'), False)
self.assertEqual(parse('base.group_user,!base.group_multi_currency') <= parse('base.group_user,!base.group_system,!base.group_multi_currency'), False)
self.assertEqual(parse('base.group_system,!base.group_portal,!base.group_public') <= parse('base.group_system,!base.group_public'), True)
def test_groups_3_from_ref(self):
@ -612,31 +612,31 @@ class TestGroupsOdoo(common.TransactionCase):
self.assertEqual(str(parse('base.group_user & base.group_portal | base.group_user & ~base.group_system') & parse('~base.group_user & base.group_portal')), "~*")
self.assertEqual(str(parse('base.group_user & base.group_portal | base.group_user & base.group_system') & parse('base.group_user & ~base.group_portal')), "'base.group_system'")
self.assertEqual(str(parse('base.group_public & base.group_erp_manager | base.group_public & base.group_portal') & parse('*')), "~*")
self.assertEqual(str(parse('base.group_system & base.group_allow_export') & parse('base.group_portal | base.group_system')), "'base.group_system' & 'base.group_allow_export'")
self.assertEqual(str(parse('base.group_system & base.group_multi_currency') & parse('base.group_portal | base.group_system')), "'base.group_system' & 'base.group_multi_currency'")
self.assertEqual(str(parse('base.group_portal & base.group_erp_manager') | parse('base.group_erp_manager')), "'base.group_erp_manager'")
self.assertEqual(parse('base.group_system & base.group_allow_export') < parse('base.group_system'), True)
self.assertEqual(parse('base.group_system & base.group_multi_currency') < parse('base.group_system'), True)
self.assertEqual(parse('base.base_test_group') == parse('base.base_test_group & base.group_user'), True)
self.assertEqual(parse('base.group_system | base.base_test_group') == parse('base.group_system & base.group_user | base.base_test_group & base.group_user'), True)
self.assertEqual(parse('base.group_public & base.group_allow_export') <= parse('base.group_public'), True)
self.assertEqual(parse('base.group_public') <= parse('base.group_public & base.group_allow_export'), False)
self.assertEqual(parse('base.group_public & base.group_multi_currency') <= parse('base.group_public'), True)
self.assertEqual(parse('base.group_public') <= parse('base.group_public & base.group_multi_currency'), False)
self.assertEqual(parse('base.group_public & base.group_user') <= parse('base.group_portal'), True)
self.assertEqual(parse('base.group_public & base.group_user') <= parse('base.group_public | base.group_user'), True)
self.assertEqual(parse('base.group_public & base.group_system') <= parse('base.group_user'), True)
self.assertEqual(parse('base.group_public & base.group_system') <= parse('base.group_portal | base.group_user'), True)
self.assertEqual(parse('base.group_public & base.group_allow_export') <= parse('~base.group_public'), False)
self.assertEqual(parse('base.group_public & base.group_multi_currency') <= parse('~base.group_public'), False)
self.assertEqual(parse('base.group_portal & base.group_public | base.group_system & base.group_public') <= parse('base.group_public'), True)
self.assertEqual(parse('base.group_portal & base.group_user | base.group_system & base.group_user') <= parse('base.group_user'), True)
self.assertEqual(parse('base.group_portal & base.group_system | base.group_user & base.group_system') <= parse('base.group_system'), True)
self.assertEqual(parse('base.group_portal & base.group_user | base.group_user & base.group_user') <= parse('base.group_user'), True)
self.assertEqual(parse('base.group_portal & base.group_user | base.group_user & base.group_user') <= parse('base.group_user'), True)
self.assertEqual(parse('base.group_public') <= parse('base.group_portal & base.group_public | base.group_system & base.group_public'), False)
self.assertEqual(parse('base.group_user & base.group_allow_export') <= parse('base.group_user & base.group_system & base.group_allow_export'), False)
self.assertEqual(parse('base.group_system & base.group_allow_export') <= parse('base.group_user & base.group_system & base.group_allow_export'), True)
self.assertEqual(parse('base.group_system & base.group_allow_export') <= parse('base.group_system'), True)
self.assertEqual(parse('base.group_user & base.group_multi_currency') <= parse('base.group_user & base.group_system & base.group_multi_currency'), False)
self.assertEqual(parse('base.group_system & base.group_multi_currency') <= parse('base.group_user & base.group_system & base.group_multi_currency'), True)
self.assertEqual(parse('base.group_system & base.group_multi_currency') <= parse('base.group_system'), True)
self.assertEqual(parse('base.group_public') >= parse('base.group_portal & base.group_public | base.group_system & base.group_public'), True)
self.assertEqual(parse('base.group_user & base.group_public') >= parse('base.group_user & base.group_portal & base.group_public | base.group_user & base.group_system & base.group_public'), True)
self.assertEqual(parse('base.group_system & base.group_allow_export') >= parse('base.group_system'), False)
self.assertEqual(parse('base.group_system & base.group_allow_export') > parse('base.group_system'), False)
self.assertEqual(parse('base.group_system & base.group_multi_currency') >= parse('base.group_system'), False)
self.assertEqual(parse('base.group_system & base.group_multi_currency') > parse('base.group_system'), False)
def test_groups_4_full_empty(self):
user_group_ids = self.env.user._get_group_ids()
@ -656,7 +656,7 @@ class TestGroupsOdoo(common.TransactionCase):
tests = [
# group on the user, # groups access, access
('base.group_public', 'base.group_system | base.group_public', True),
('base.group_public,base.group_allow_export', 'base.group_user | base.group_public', True),
('base.group_public,base.group_multi_currency', 'base.group_user | base.group_public', True),
('base.group_public', 'base.group_system & base.group_public', False),
('base.group_public', 'base.group_system | base.group_portal', False),
('base.group_public', 'base.group_system & base.group_portal', False),
@ -674,15 +674,15 @@ class TestGroupsOdoo(common.TransactionCase):
('base.group_portal', 'base.group_portal & ~base.group_user', True),
('base.group_system', '~base.group_system & base.group_user', False),
('base.group_system', '~base.group_system & ~base.group_user', False),
('base.group_user', 'base.group_user & base.group_sanitize_override & base.group_allow_export', False),
('base.group_system', 'base.group_user & base.group_sanitize_override & base.group_allow_export', False),
('base.group_system,base.group_allow_export', 'base.group_user & base.group_sanitize_override & base.group_allow_export', True),
('base.group_user,base.group_sanitize_override,base.group_allow_export', 'base.group_user & base.group_sanitize_override & base.group_allow_export', True),
('base.group_user', 'base.group_user & base.group_sanitize_override & base.group_multi_currency', False),
('base.group_system', 'base.group_user & base.group_sanitize_override & base.group_multi_currency', False),
('base.group_system,base.group_multi_currency', 'base.group_user & base.group_sanitize_override & base.group_multi_currency', True),
('base.group_user,base.group_sanitize_override,base.group_multi_currency', 'base.group_user & base.group_sanitize_override & base.group_multi_currency', True),
('base.group_user', 'base.group_erp_manager | base.group_multi_company', False),
('base.group_user,base.group_erp_manager', 'base.group_erp_manager | base.group_multi_company', True),
]
for user_groups, groups, result in tests:
user.groups_id = [(6, 0, [self.env.ref(xmlid).id for xmlid in user_groups.split(',')])]
user.group_ids = [(6, 0, [self.env.ref(xmlid).id for xmlid in user_groups.split(',')])]
self.assertEqual(self.parse_repr(groups).matches(user._get_group_ids()), result, f'User ({user_groups!r}) should {"" if result else "not "}have access to groups: ({groups!r})')
def test_groups_6_distinct(self):
@ -690,9 +690,22 @@ class TestGroupsOdoo(common.TransactionCase):
'name': 'A User',
'login': 'a_user',
'email': 'a@user.com',
'groups_id': self.env.ref('base.group_user').ids,
'group_ids': self.env.ref('base.group_user').ids,
})
with self.assertRaisesRegex(ValidationError, "The user cannot have more than one user types."):
user.groups_id = [(4, self.env.ref('base.group_public').id)]
with self.assertRaisesRegex(ValidationError, "The user cannot have more than one user types."):
user.groups_id = [(4, self.env.ref('base.group_portal').id)]
# update res.users groups with distinct groups
with self.assertRaises(ValidationError, msg="The user cannot have more than one user types."):
user.group_ids = [(4, self.env.ref('base.group_public').id)]
with self.assertRaises(ValidationError, msg="The user cannot have more than one user types."):
user.group_ids = [(4, self.env.ref('base.group_portal').id)]
user.group_ids = self.env.ref('base.group_user') + self.test_group
# update res.group implied_ids having the effect that users have distinct groups
with self.assertRaises(ValidationError, msg="The user cannot have more than one user types."):
self.test_group.implied_ids += self.env.ref('base.group_public')
with self.assertRaises(ValidationError, msg="The user cannot have more than one user types."):
self.env.ref('base.group_public').implied_by_ids = self.test_group
# this works because public user is inactive
self.env.ref('base.group_public').implied_ids += self.test_group

View file

@ -1,12 +1,13 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import requests
import threading
from unittest.mock import patch
from odoo.http import Controller, request, route
from odoo.tests.common import ChromeBrowser, HttpCase, tagged
from odoo.http import route, Controller, request
from odoo.tests.common import HttpCase, tagged, ChromeBrowser, TEST_CURSOR_COOKIE_NAME, Like
from odoo.tools import config
from unittest.mock import patch
_logger = logging.getLogger(__name__)
@ -84,20 +85,21 @@ class TestChromeBrowser(HttpCase):
self.addCleanup(self.browser.stop)
def test_screencasts(self):
self.browser.start_screencast()
self.browser.screencaster.start()
self.browser.navigate_to('about:blank')
self.browser._wait_ready()
code = "setTimeout(() => console.log('test successful'), 2000); setInterval(() => document.body.innerText = (new Date()).getTime(), 100);"
self.browser._wait_code_ok(code, 10)
self.browser._save_screencast()
self.browser.screencaster.save()
@tagged('-at_install', 'post_install')
class TestChromeBrowserOddDimensions(TestChromeBrowser):
allow_inherited_tests_method = True
browser_size = "1215x768"
class TestRequestRemaining(HttpCase):
class TestRequestRemainingCommon(HttpCase):
# This test case tries to reproduce the case where a request is lost between two test and is execute during the secone one.
#
# - Test A browser js finishes with a pending request
@ -116,36 +118,126 @@ class TestRequestRemaining(HttpCase):
cls.main_lock = threading.Lock()
cls.main_lock.acquire()
def test_requests_a(self):
class Dummycontroller(Controller):
@route('/web/concurrent', type='http', auth='public', sitemap=False)
def wait(c, **params):
self.assertEqual(request.env.cr.__class__.__name__, 'TestCursor')
assert request.env.cr.__class__.__name__ == 'TestCursor'
request.env.cr.execute('SELECT 1')
request.env.cr.fetchall()
# not that the previous queries are not really needed since the http stack will check the registry
# but this makes the test more clear and robust
_logger.info('B finish')
self.env.registry.clear_cache('routing')
self.addCleanup(self.env.registry.clear_cache, 'routing')
cls.env.registry.clear_cache('routing')
cls.addClassCleanup(cls.env.registry.clear_cache, 'routing')
def _test_requests_a(self, cookie=False):
def late_request_thread():
# In some rare case the request may arrive after _wait_remaining_requests.
# this thread is trying to reproduce this case.
_logger.info('Waiting for B to start')
if self.main_lock.acquire(timeout=10):
self.url_open("/web/concurrent", timeout=10)
_logger.info('Opening url')
# don't use url_open since it simulates a lost request from chrome and url_open would wait to aquire the lock
s = requests.Session()
if cookie:
s.cookies.set(TEST_CURSOR_COOKIE_NAME, self.canonical_tag)
s.get(self.base_url() + "/web/concurrent", timeout=10)
else:
_logger.error('Something went wrong and thread was not able to aquire lock')
TestRequestRemaining.thread_a = threading.Thread(target=late_request_thread)
type(self).thread_a = threading.Thread(target=late_request_thread)
self.thread_a.start()
def test_requests_b(self):
def _test_requests_b(self):
self.env.cr.execute('SELECT 1')
with self.assertLogs('odoo.tests.common') as lc:
self.main_lock.release()
_logger.info('B started, waiting for A to finish')
self.thread_a.join()
self.assertEqual(lc.output[0].split(':', 1)[1], 'odoo.tests.common:Request with path /web/concurrent has been ignored during test as it it does not contain the test_cursor cookie or it is expired. (required "/base/tests/test_http_case.py:TestRequestRemaining.test_requests_b", got "/base/tests/test_http_case.py:TestRequestRemaining.test_requests_a")')
self.main_lock.release()
_logger.info('B started, waiting for A to finish')
self.thread_a.join()
self.env.cr.fetchall()
class TestRequestRemainingNoCookie(TestRequestRemainingCommon):
def test_requests_a(self):
self._test_requests_a()
def test_requests_b(self):
with self.assertLogs('odoo.tests.common') as log_catcher:
self._test_requests_b()
self.assertEqual(
log_catcher.output,
[Like('... odoo.tests.common:Request with path /web/concurrent has been ignored during test as it it does not contain the test_cursor cookie or it is expired. '
'(required "None (request are not enabled)", got "None")')],
)
class TestRequestRemainingNotEnabled(TestRequestRemainingCommon):
def test_requests_a(self):
self._test_requests_a(cookie=True)
def test_requests_b(self):
with self.assertLogs('odoo.tests.common') as log_catcher:
self._test_requests_b()
self.assertEqual(
log_catcher.output,
[Like('... odoo.tests.common:Request with path /web/concurrent has been ignored during test as it it does not contain the test_cursor cookie or it is expired. '
'(required "None (request are not enabled)", got "/base/tests/test_http_case.py:TestRequestRemainingNotEnabled.test_requests_a")')],
)
class TestRequestRemainingStartDuringNext(TestRequestRemainingCommon):
def test_requests_a(self):
self._test_requests_a(cookie=True)
def test_requests_b(self):
with self.assertLogs('odoo.tests.common') as log_catcher, self.allow_requests():
self._test_requests_b()
self.assertEqual(
log_catcher.output,
[Like('... odoo.tests.common:Request with path /web/concurrent has been ignored during test as it it does not contain the test_cursor cookie or it is expired. '
'(required "/base/tests/test_http_case.py:TestRequestRemainingStartDuringNext.test_requests_b__0", got "/base/tests/test_http_case.py:TestRequestRemainingStartDuringNext.test_requests_a")')],
)
class TestRequestRemainingAfterFirstCheck(TestRequestRemainingCommon):
"""
This test is more specific to the current implem and check what happens if the lock is aquired after the next thread
Scenario:
- test_requests_a closes browser js, aquire the lock
- a ghost request tries to open a test curso, makes the first check (assertCanOpenTestCursor)
- the next test enables resquest (here using url_open) releasing the lock
- the pending request is executed but detects the test change
"""
def test_requests_a(self, cookie=False):
self.http_request_key = self.canonical_tag
def late_request_thread():
_logger.info('Opening url')
# don't use url_open since it simulates a lost request from chrome and url_open would wait to aquire the lock
s = requests.Session()
s.cookies.set(TEST_CURSOR_COOKIE_NAME, self.http_request_key)
# we exceptc the request to be stuck when aquiring the registry lock
s.get(self.base_url() + "/web/concurrent", timeout=10)
type(self).thread_a = threading.Thread(target=late_request_thread)
self.thread_a.start()
# we need to ensure that the first check is made and that we are aquiring the lock
self.main_lock.acquire()
def assertCanOpenTestCursor(self):
super().assertCanOpenTestCursor()
# the first time we check assertCanOpenTestCursor we need release the lock (lowks ensure we are still inside test_requests_a)
if self.main_lock:
self.main_lock.release()
self.main_lock = None
def test_requests_b(self):
_logger.info('B started, waiting for A to finish')
# url_open will simulate a enabled request
with self.assertLogs('odoo.tests.common') as log_catcher, self.allow_requests():
self.thread_a.join()
self.assertEqual(
log_catcher.output,
[Like('... Trying to open a test cursor for /base/tests/test_http_case.py:TestRequestRemainingAfterFirstCheck.test_requests_a while already in a test /base/tests/test_http_case.py:TestRequestRemainingAfterFirstCheck.test_requests_b')],
)

View file

@ -0,0 +1,61 @@
import unittest
from odoo.tests import TransactionCase, can_import, loaded_demo_data, tagged
from odoo.tools.misc import file_open
@tagged("post_install", "-at_install")
class TestImportFiles(TransactionCase):
@unittest.skipUnless(
can_import("xlrd.xlsx") or can_import("openpyxl"), "XLRD/XLSX not available",
)
def test_import_contacts_template_xls(self):
if not loaded_demo_data(self.env):
self.skipTest('Needs demo data to be able to import those files')
model = "res.partner"
filename = "contacts_import_template.xlsx"
file_content = file_open(f"base/static/xls/{filename}", "rb").read()
import_wizard = self.env["base_import.import"].create(
{
"res_model": model,
"file": file_content,
"file_type": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
},
)
result = import_wizard.parse_preview(
{
"has_headers": True,
},
)
self.assertIsNone(result.get("error"))
field_names = ['/'.join(v) for v in result["matches"].values()]
results = import_wizard.execute_import(
field_names,
[r.lower() for r in result["headers"]],
{
"import_skip_records": [],
"import_set_empty_fields": [],
"fallback_values": {},
"name_create_enabled_fields": {},
"encoding": "",
"separator": "",
"quoting": '"',
"date_format": "",
"datetime_format": "",
"float_thousand_separator": ",",
"float_decimal_separator": ".",
"advanced": True,
"has_headers": True,
"keep_matches": False,
"limit": 2000,
"skip": 0,
"tracking_disable": True,
},
)
self.assertFalse(
results["messages"],
"results should be empty on successful import of ",
)

View file

@ -0,0 +1,59 @@
import logging
import subprocess
import sys
import time
from pathlib import Path
from odoo.tests import BaseCase
_logger = logging.getLogger(__name__)
class TestInit(BaseCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.python_path = Path(__file__).parents[4].resolve()
def run_python(self, code, check=True, capture_output=True, text=True, timeout=10, env=None, **kwargs):
code = code.replace('\n', '; ')
env = {
**(env or {}),
"PYTHONPATH": str(self.python_path),
}
# disable warnings for frozen_modules when debugger is running
return subprocess.run(
[sys.executable, '-c', code],
capture_output=capture_output,
check=check,
env=env,
text=text,
timeout=timeout,
**kwargs
)
def odoo_modules_to_test(self):
import odoo.cli # noqa: PLC0415
for path in (*odoo.__path__, *odoo.cli.__path__):
parent = Path(path)
for module in parent.iterdir():
if (module.is_dir() or module.suffix == '.py') and '__' not in module.name:
if parent.name == 'odoo':
yield f"odoo.{module.stem}"
else:
yield f"odoo.{parent.name}.{module.stem}"
def test_import(self):
"""Test that importing a sub-module in any order works."""
EXPECT_UTC = ('init', 'cli', 'http', 'modules', 'service', 'api', 'fields', 'models', 'orm', 'tests')
for module in sorted(self.odoo_modules_to_test()):
set_timezone = any(expect in module for expect in EXPECT_UTC)
env = {'TZ': 'CET'}
timezone = 'UTC' if set_timezone else 'CET'
code = f"import {module}; import sys, time; sys.exit(0 if (time.tzname[0] == '{timezone}') else 5)"
with self.subTest(module=module, timezone=timezone):
start_time = time.perf_counter()
self.run_python(code, env=env, check=False)
end_time = time.perf_counter()
_logger.info(" %s execution time: %.3fs", module, end_time - start_time)

View file

@ -0,0 +1,47 @@
from unittest import SkipTest
from odoo.tests.common import standalone
from odoo.tests.test_module_operations import install
from odoo.tools import mute_logger
from odoo.tools.convert import ParseError
@standalone('test_isolated_install')
def test_isolated_install(env):
""" This test checks that a module failing to install has no side effect on
other modules. In particular, the module that was installed just before is
correctly marked as 'installed'.
"""
MODULE_NAMES = ['test_install_base', 'test_install_auto', 'test_install_fail']
modules = {
module.name: module
for module in env['ir.module.module'].search([('name', 'in', MODULE_NAMES)])
}
if len(modules) < 3:
raise SkipTest(f"Failed to find the required modules {MODULE_NAMES}")
if not all(module.state == 'uninstalled' for module in modules.values()):
raise SkipTest(f"The modules {MODULE_NAMES} should not be installed")
# now install test_install_fail, which should install test_install_base and
# test_install_auto just before it
try:
with mute_logger('odoo.modules.registry'):
install(env.cr.dbname, modules['test_install_fail'].id, 'test_install_fail')
except ParseError:
pass
# make sure to reset the transaction
env.cr.rollback()
env.transaction.reset()
# check the presence of the cron
cron = env['ir.cron'].search([('cron_name', '=', 'test_install_auto_cron')])
assert cron, "The cron 'test_install_auto_cron' has not been created"
# check the states of the modules
assert modules['test_install_base'].state == 'installed', "Module 'test_install_base' not installed"
assert modules['test_install_auto'].state == 'installed', "Module 'test_install_auto' not installed"
assert modules['test_install_fail'].state == 'uninstalled', "Module 'test_install_fail' should be uninstalled"
# check that test_install_auto's code is present
assert env['res.currency']._test_install_auto_cron() is True, "Cron code not working"

View file

@ -0,0 +1,14 @@
{
'name': 'test_install_auto',
'version': '1.0',
'category': 'Hidden/Tools',
'description': "",
'depends': ['test_install_base'],
'data': [
'data/ir_cron.xml',
],
'installable': True,
'auto_install': True,
'author': 'Odoo S.A.',
'license': 'LGPL-3',
}

View file

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="utf-8"?>
<odoo>
<data>
<record id="test_install_auto_cron" model="ir.cron">
<field name="name">test_install_auto_cron</field>
<field name="model_id" ref="base.model_res_currency"/>
<field name="state">code</field>
<field name="code">model._test_install_auto_cron()</field>
<field name="active" eval="True"/>
<field name="user_id" ref="base.user_root"/>
<field name="interval_number">12</field>
<field name="interval_type">hours</field>
</record>
</data>
</odoo>

View file

@ -0,0 +1,11 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class ResCurrency(models.Model):
_inherit = 'res.currency'
@api.model
def _test_install_auto_cron(self):
return True

View file

@ -0,0 +1,10 @@
{
'name': 'test_install_base',
'version': '1.0',
'category': 'Hidden/Tools',
'description': "",
'depends': ['base'],
'installable': True,
'author': 'Odoo S.A.',
'license': 'LGPL-3',
}

View file

@ -0,0 +1,13 @@
{
'name': 'test_install_fail',
'version': '1.0',
'category': 'Hidden/Tools',
'description': "",
'depends': ['test_install_base'],
'data': [
'views/res_currency_views.xml',
],
'installable': True,
'author': 'Odoo S.A.',
'license': 'LGPL-3',
}

View file

@ -0,0 +1,13 @@
<?xml version='1.0' encoding='utf-8'?>
<odoo>
<record id="view_currency_form_inherit_bad_module" model="ir.ui.view">
<field name="name">view.currency.form.inherit.bad.module</field>
<field name="model">res.currency</field>
<field name="inherit_id" ref="base.view_currency_form"/>
<field name="arch" type="xml">
<xpath expr="//field[@name='ERROR']" position="after">
<h3>to fail</h3>
</xpath>
</field>
</record>
</odoo>

View file

@ -0,0 +1,19 @@
### test_isolated_install
Given the following package structure (where `\_` denotes a dependency)
<pre>
...
\_ test_install_base
\_ test_install_auto
\_ test_install_fail
</pre>
Given:
* The auto-install module test_install_auto
* Has a model override of ResCurrency that defines a new method for the class
* Has some data that defines an IrCron record, which executes the method added in the override
* The data error module test_install_fail:
* Has a view extension which has some bad syntax that will trigger a failure in load_data()
We want to ensure each module is installed in an isolated manner, i.e., the
failed installation of a module should not affect the state of another module
which has been installed just before.

View file

@ -0,0 +1,206 @@
from datetime import datetime
from odoo.tests.common import TransactionCase
from odoo.tools.intervals import Intervals, intervals_overlap, invert_intervals
class TestIntervals(TransactionCase):
def ints(self, pairs):
recs = self.env['base']
return [(a, b, recs) for a, b in pairs]
def test_union(self):
def check(a, b):
a, b = self.ints(a), self.ints(b)
self.assertEqual(list(Intervals(a)), b)
check([(1, 2), (3, 4)], [(1, 2), (3, 4)])
check([(1, 2), (2, 4)], [(1, 4)])
check([(1, 3), (2, 4)], [(1, 4)])
check([(1, 4), (2, 3)], [(1, 4)])
check([(3, 4), (1, 2)], [(1, 2), (3, 4)])
check([(2, 4), (1, 2)], [(1, 4)])
check([(2, 4), (1, 3)], [(1, 4)])
check([(2, 3), (1, 4)], [(1, 4)])
def test_intersection(self):
def check(a, b, c):
a, b, c = self.ints(a), self.ints(b), self.ints(c)
self.assertEqual(list(Intervals(a) & Intervals(b)), c)
check([(10, 20)], [(5, 8)], [])
check([(10, 20)], [(5, 10)], [])
check([(10, 20)], [(5, 15)], [(10, 15)])
check([(10, 20)], [(5, 20)], [(10, 20)])
check([(10, 20)], [(5, 25)], [(10, 20)])
check([(10, 20)], [(10, 15)], [(10, 15)])
check([(10, 20)], [(10, 20)], [(10, 20)])
check([(10, 20)], [(10, 25)], [(10, 20)])
check([(10, 20)], [(15, 18)], [(15, 18)])
check([(10, 20)], [(15, 20)], [(15, 20)])
check([(10, 20)], [(15, 25)], [(15, 20)])
check([(10, 20)], [(20, 25)], [])
check(
[(0, 5), (10, 15), (20, 25), (30, 35)],
[(6, 7), (9, 12), (13, 17), (22, 23), (24, 40)],
[(10, 12), (13, 15), (22, 23), (24, 25), (30, 35)],
)
def test_difference(self):
def check(a, b, c):
a, b, c = self.ints(a), self.ints(b), self.ints(c)
self.assertEqual(list(Intervals(a) - Intervals(b)), c)
check([(10, 20)], [(5, 8)], [(10, 20)])
check([(10, 20)], [(5, 10)], [(10, 20)])
check([(10, 20)], [(5, 15)], [(15, 20)])
check([(10, 20)], [(5, 20)], [])
check([(10, 20)], [(5, 25)], [])
check([(10, 20)], [(10, 15)], [(15, 20)])
check([(10, 20)], [(10, 20)], [])
check([(10, 20)], [(10, 25)], [])
check([(10, 20)], [(15, 18)], [(10, 15), (18, 20)])
check([(10, 20)], [(15, 20)], [(10, 15)])
check([(10, 20)], [(15, 25)], [(10, 15)])
check([(10, 20)], [(20, 25)], [(10, 20)])
check(
[(0, 5), (10, 15), (20, 25), (30, 35)],
[(6, 7), (9, 12), (13, 17), (22, 23), (24, 40)],
[(0, 5), (12, 13), (20, 22), (23, 24)],
)
class TestUtils(TransactionCase):
def test_intervals_intersections(self):
test_data = [
((datetime(2023, 2, 14), datetime(2023, 2, 15)),
(datetime(2023, 2, 15), datetime(2023, 2, 16)), False),
((datetime(2023, 2, 14), datetime(2023, 2, 15)),
(datetime(2023, 2, 13), datetime(2023, 2, 16)), True),
((datetime(2023, 2, 13), datetime(2023, 2, 16)),
(datetime(2023, 2, 14), datetime(2023, 2, 15)), True),
((datetime(2023, 2, 13), datetime(2023, 2, 16)),
(datetime(2023, 2, 15), datetime(2023, 2, 17)), True),
]
for interval_a, interval_b, overlaps in test_data:
with self.subTest(interval_a=interval_a, interval_b=interval_b):
self.assertEqual(intervals_overlap(
interval_a, interval_b), overlaps)
def test_intervals_inversion(self):
test_intervals = [
(datetime(2023, 2, 5), datetime(2023, 2, 6)), # no adjacent
(datetime(2023, 2, 7), datetime(2023, 2, 7)), # 0-length
(datetime(2023, 2, 9), datetime(2023, 2, 10)), # multi-adjacent
(datetime(2023, 2, 10), datetime(2023, 2, 11)),
(datetime(2023, 2, 11), datetime(2023, 2, 12)),
(datetime(2023, 2, 13), datetime(2023, 2, 15)), # overlapping
(datetime(2023, 2, 14), datetime(2023, 2, 18)),
(datetime(2023, 2, 15), datetime(2023, 2, 16)), # contained inside the previous
(datetime(2023, 2, 25), datetime(2023, 3, 10)), # unordered non-adjacent
(datetime(2023, 2, 20), datetime(2023, 2, 22)),
]
test_limits = [
(datetime(2023, 1, 1), datetime(2023, 4, 1)), # all-encompassing
(datetime(2023, 2, 5), datetime(2023, 3, 10)), # exact fit original intervals
(datetime(2023, 2, 9), datetime(2023, 2, 12)), # exact fit of one interval
(datetime(2023, 2, 6), datetime(2023, 2, 9)), # exact fit of one inverted interval
(datetime(2023, 2, 8), datetime(2023, 2, 11)), # overlapping some
]
test_results = [
[
(datetime(2023, 1, 1), datetime(2023, 2, 5)),
(datetime(2023, 2, 6), datetime(2023, 2, 9)),
(datetime(2023, 2, 12), datetime(2023, 2, 13)),
(datetime(2023, 2, 18), datetime(2023, 2, 20)),
(datetime(2023, 2, 22), datetime(2023, 2, 25)),
(datetime(2023, 3, 10), datetime(2023, 4, 1)),
],
[
(datetime(2023, 2, 6), datetime(2023, 2, 9)),
(datetime(2023, 2, 12), datetime(2023, 2, 13)),
(datetime(2023, 2, 18), datetime(2023, 2, 20)),
(datetime(2023, 2, 22), datetime(2023, 2, 25)),
],
[],
[
(datetime(2023, 2, 6), datetime(2023, 2, 9)),
],
[
(datetime(2023, 2, 8), datetime(2023, 2, 9)),
],
]
for limits, expected_result in zip(test_limits, test_results):
start, end = limits
with self.subTest(start=start, end=end):
self.assertListEqual(invert_intervals(test_intervals, start, end), expected_result)
class TestKeepDistinctIntervals(TransactionCase):
def ints(self, pairs):
recs = self.env['base']
return [(a, b, recs) for a, b in pairs]
def test_union(self):
def check(a, b):
a, b = self.ints(a), self.ints(b)
self.assertEqual(list(Intervals(a, keep_distinct=True)), b)
check([(1, 2), (3, 4)], [(1, 2), (3, 4)])
check([(1, 2), (2, 4)], [(1, 2), (2, 4)])
check([(1, 3), (2, 4)], [(1, 4)])
check([(1, 4), (2, 3)], [(1, 4)])
check([(1, 4), (1, 4)], [(1, 4)])
check([(3, 4), (1, 2)], [(1, 2), (3, 4)])
check([(2, 4), (1, 2)], [(1, 2), (2, 4)])
check([(2, 4), (1, 3)], [(1, 4)])
check([(2, 3), (1, 4)], [(1, 4)])
def test_intersection(self):
def check(a, b, c):
a, b, c = self.ints(a), self.ints(b), self.ints(c)
self.assertEqual(list(Intervals(a, keep_distinct=True) & Intervals(b, keep_distinct=True)), c)
check([(10, 20)], [(5, 8)], [])
check([(10, 20)], [(5, 10)], [])
check([(10, 20)], [(5, 15)], [(10, 15)])
check([(10, 20)], [(5, 20)], [(10, 20)])
check([(10, 20)], [(5, 25)], [(10, 20)])
check([(10, 20)], [(10, 15)], [(10, 15)])
check([(10, 20)], [(10, 20)], [(10, 20)])
check([(10, 20)], [(10, 25)], [(10, 20)])
check([(10, 20)], [(15, 18)], [(15, 18)])
check([(10, 20)], [(15, 20)], [(15, 20)])
check([(10, 20)], [(15, 25)], [(15, 20)])
check([(10, 20)], [(20, 25)], [])
check(
[(0, 5), (10, 15), (20, 25), (30, 35)],
[(6, 7), (9, 12), (13, 17), (22, 23), (24, 40)],
[(10, 12), (13, 15), (22, 23), (24, 25), (30, 35)],
)
def test_difference(self):
def check(a, b, c):
a, b, c = self.ints(a), self.ints(b), self.ints(c)
self.assertEqual(list(Intervals(a, keep_distinct=True) - Intervals(b, keep_distinct=True)), c)
check([(10, 20)], [(5, 8)], [(10, 20)])
check([(10, 20)], [(5, 10)], [(10, 20)])
check([(10, 20)], [(5, 15)], [(15, 20)])
check([(10, 20)], [(5, 20)], [])
check([(10, 20)], [(5, 25)], [])
check([(10, 20)], [(10, 15)], [(15, 20)])
check([(10, 20)], [(10, 20)], [])
check([(10, 20)], [(10, 25)], [])
check([(10, 20)], [(15, 18)], [(10, 15), (18, 20)])
check([(10, 20)], [(15, 20)], [(10, 15)])
check([(10, 20)], [(15, 25)], [(10, 15)])
check([(10, 20)], [(20, 25)], [(10, 20)])
check(
[(0, 5), (10, 15), (20, 25), (30, 35)],
[(6, 7), (9, 12), (13, 17), (22, 23), (24, 40)],
[(0, 5), (12, 13), (20, 22), (23, 24)],
)

View file

@ -3,6 +3,7 @@
from datetime import date
import json
from markupsafe import Markup
from psycopg2 import IntegrityError, ProgrammingError
import requests
from unittest.mock import patch
@ -189,6 +190,59 @@ ZeroDivisionError: division by zero""" % self.test_server_action.id
self.assertEqual(len(category), 1, 'ir_actions_server: TODO')
self.assertIn(category, self.test_partner.category_id)
def test_25_crud_copy(self):
self.action.write({
'state': 'object_copy',
'crud_model_id': self.res_partner_model.id,
'resource_ref': self.test_partner,
})
partner = self.env['res.partner'].search([('name', 'ilike', self.test_partner.name)])
self.assertEqual(len(partner), 1)
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: duplicate record action correctly finished should return False')
partner = self.env['res.partner'].search([('name', 'ilike', self.test_partner.name)])
self.assertEqual(len(partner), 2)
def test_25_crud_copy_link_many2one(self):
self.action.write({
'state': 'object_copy',
'crud_model_id': self.res_partner_model.id,
'resource_ref': self.test_partner,
'link_field_id': self.res_partner_parent_field.id,
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: duplicate record action correctly finished should return False')
dupe = self.test_partner.search([('name', 'ilike', self.test_partner.name), ('id', '!=', self.test_partner.id)])
self.assertEqual(len(dupe), 1)
self.assertEqual(self.test_partner.parent_id, dupe)
def test_25_crud_copy_link_one2many(self):
self.action.write({
'state': 'object_copy',
'crud_model_id': self.res_partner_model.id,
'resource_ref': self.test_partner,
'link_field_id': self.res_partner_children_field.id,
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: duplicate record action correctly finished should return False')
dupe = self.test_partner.search([('name', 'ilike', self.test_partner.name), ('id', '!=', self.test_partner.id)])
self.assertEqual(len(dupe), 1)
self.assertIn(dupe, self.test_partner.child_ids)
def test_25_crud_copy_link_many2many(self):
category_id = self.env['res.partner.category'].name_create("CategoryToDuplicate")[0]
self.action.write({
'state': 'object_copy',
'crud_model_id': self.res_partner_category_model.id,
'link_field_id': self.res_partner_category_field.id,
'resource_ref': f'res.partner.category,{category_id}',
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: duplicate record action correctly finished should return False')
dupe = self.env['res.partner.category'].search([('name', 'ilike', 'CategoryToDuplicate'), ('id', '!=', category_id)])
self.assertEqual(len(dupe), 1)
self.assertIn(dupe, self.test_partner.category_id)
def test_30_crud_write(self):
# Do: update partner name
self.action.write({
@ -203,6 +257,20 @@ ZeroDivisionError: division by zero""" % self.test_server_action.id
self.assertEqual(len(partner), 1, 'ir_actions_server: TODO')
self.assertEqual(partner.city, 'OrigCity', 'ir_actions_server: TODO')
def test_31_crud_write_html(self):
self.assertEqual(self.action.value, False)
self.action.write({
'state': 'object_write',
'update_path': 'comment',
'html_value': '<p>MyComment</p>',
})
self.assertEqual(self.action.html_value, Markup('<p>MyComment</p>'))
# Test run
self.assertEqual(self.test_partner.comment, False)
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
self.assertEqual(self.test_partner.comment, Markup('<p>MyComment</p>'))
def test_object_write_equation(self):
# Do: update partners city
self.action.write({
@ -420,7 +488,7 @@ ZeroDivisionError: division by zero""" % self.test_server_action.id
self.action.write({
'model_id': self.res_country_model.id,
'binding_model_id': self.res_country_model.id,
'groups_id': [Command.link(group0.id)],
'group_ids': [Command.link(group0.id)],
'code': 'record.write({"vat_label": "VatFromTest"})',
})
@ -433,7 +501,7 @@ ZeroDivisionError: division by zero""" % self.test_server_action.id
self.assertFalse(self.test_country.vat_label)
# add group to the user, and test again
self.env.user.write({'groups_id': [Command.link(group0.id)]})
self.env.user.write({'group_ids': [Command.link(group0.id)]})
bindings = Actions.get_bindings('res.country')
self.assertItemsEqual(bindings.get('action'), self.action.read(['name', 'sequence', 'binding_view_types']))
@ -524,9 +592,11 @@ ZeroDivisionError: division by zero""" % self.test_server_action.id
with patch.object(requests, 'post', _patched_post), mute_logger('odoo.addons.base.models.ir_actions'):
# first run: 200
self.action.with_context(self.context).run()
self.env.cr.postcommit.run() # webhooks run in postcommit
# second run: 400, should *not* raise but
# should warn in logs (hence mute_logger)
self.action.with_context(self.context).run()
self.env.cr.postcommit.run() # webhooks run in postcommit
self.assertEqual(num_requests, 2)
def test_90_convert_to_float(self):
@ -748,7 +818,7 @@ class TestCustomFields(TestCommonCustomFields):
#
# Add a custom field equivalent to the following definition:
#
# class Partner(models.Model)
# class ResPartner(models.Model)
# _inherit = 'res.partner'
# x_oh_boy = fields.Char(related="country_id.code", store=True)
#
@ -765,7 +835,7 @@ class TestCustomFields(TestCommonCustomFields):
# create a non-computed field, and assert how many queries it takes
model_id = self.env['ir.model']._get_id('res.partner')
query_count = 49
query_count = 50
with self.assertQueryCount(query_count):
self.env.registry.clear_cache()
self.env['ir.model.fields'].create({
@ -878,9 +948,9 @@ class TestCustomFieldsPostInstall(TestCommonCustomFields):
# as a user could do through a SQL shell or a `cr.execute` in a server action
self.env.cr.execute("ALTER TABLE ir_model_fields DROP CONSTRAINT ir_model_fields_name_manual_field")
self.env.cr.execute("UPDATE ir_model_fields SET name = 'foo' WHERE id = %s", [field.id])
with self.assertLogs('odoo.addons.base.models.ir_model') as log_catcher:
with self.assertLogs('odoo.registry') as log_catcher:
# Trick to reload the registry. The above rename done through SQL didn't reload the registry. This will.
self.env.registry.setup_models(self.cr)
self.env.registry._setup_models__(self.cr, [self.MODEL])
self.assertIn(
f'The field `{field.name}` is not defined in the `{field.model}` Python class', log_catcher.output[0]
)

View file

@ -4,12 +4,14 @@ import base64
import hashlib
import io
import os
import contextlib
from unittest.mock import patch
from PIL import Image
import odoo
from odoo.exceptions import AccessError
from odoo.api import SUPERUSER_ID
from odoo.exceptions import AccessError, ValidationError
from odoo.addons.base.models.ir_attachment import IrAttachment
from odoo.addons.base.tests.common import TransactionCaseWithUserDemo
from odoo.tools import mute_logger
from odoo.tools.image import image_to_base64
@ -248,14 +250,13 @@ class TestIrAttachment(TransactionCaseWithUserDemo):
self.assertFalse(os.path.isfile(store_path), 'file removed')
def test_13_rollback(self):
savepoint = self.cr.savepoint()
# the data needs to be unique so that no other attachment link
# the file so that the gc removes it
unique_blob = os.urandom(16)
a1 = self.env['ir.attachment'].create({'name': 'a1', 'raw': unique_blob})
store_path = os.path.join(self.filestore, a1.store_fname)
self.assertTrue(os.path.isfile(store_path), 'file exists')
savepoint.rollback()
with contextlib.closing(self.cr.savepoint()):
a1 = self.env['ir.attachment'].create({'name': 'a1', 'raw': unique_blob})
store_path = os.path.join(self.filestore, a1.store_fname)
self.assertTrue(os.path.isfile(store_path), 'file exists')
self.env['ir.attachment']._gc_file_store_unsafe()
self.assertFalse(os.path.isfile(store_path), 'file removed')
@ -333,14 +334,14 @@ class TestPermissions(TransactionCaseWithUserDemo):
# Check the user can access his own attachment
attachment_user.datas
# Create an attachment as superuser without res_model/res_id
attachment_admin = self.Attachments.with_user(odoo.SUPERUSER_ID).create({'name': 'foo'})
attachment_admin = self.Attachments.with_user(SUPERUSER_ID).create({'name': 'foo'})
# Check the record cannot be accessed by a regular user
with self.assertRaises(AccessError):
attachment_admin.with_user(self.env.user).datas
# Check the record can be accessed by an admin (other than superuser)
admin_user = self.env.ref('base.user_admin')
# Safety assert that base.user_admin is not the superuser, otherwise the test is useless
self.assertNotEqual(odoo.SUPERUSER_ID, admin_user.id)
self.assertNotEqual(SUPERUSER_ID, admin_user.id)
attachment_admin.with_user(admin_user).datas
@mute_logger("odoo.addons.base.models.ir_rule", "odoo.models")
@ -358,6 +359,35 @@ class TestPermissions(TransactionCaseWithUserDemo):
('res_field', '=', 'image_128')
])
self.assertTrue(attachment.datas)
with self.assertQueries([
# security SQL contains public check or accessible field with
# res_id IN accessible corecords for a given res_model
"""
SELECT "ir_attachment"."id"
FROM "ir_attachment"
WHERE ("ir_attachment"."res_field" IN %s AND "ir_attachment"."res_id" IN %s AND "ir_attachment"."res_model" IN %s AND (
"ir_attachment"."public" IS TRUE
OR (
("ir_attachment"."res_field" IN %s OR "ir_attachment"."res_field" IS NULL)
AND "ir_attachment"."res_id" IN (
SELECT "res_partner"."id"
FROM "res_partner"
WHERE "res_partner"."id" IN %s AND (
("res_partner"."company_id" IN %s OR "res_partner"."company_id" IS NULL)
OR "res_partner"."partner_share" IS NOT TRUE
)
)
AND "ir_attachment"."res_model" IN %s
)
))
ORDER BY "ir_attachment"."id" DESC
"""
]):
self.env['ir.attachment'].search([
('res_model', '=', 'res.partner'),
('res_id', '=', main_partner.id),
('res_field', '=', 'image_128')
])
# Patch the field `res.partner.image_128` to make it unreadable by the demo user
self.patch(self.env.registry['res.partner']._fields['image_128'], 'groups', 'base.group_system')
@ -419,3 +449,27 @@ class TestPermissions(TransactionCaseWithUserDemo):
# even from a record with write permissions
with self.assertRaises(AccessError):
copied.copy({'res_model': unwritable._name, 'res_id': unwritable.id})
def test_write_error(self):
# try to write a file in a place where we have no access
# /proc is not writeable, check if we have an error raised
self.patch(IrAttachment, '_get_path', lambda self, binary, _checksum: (binary, '/proc/dummy_test'))
with self.assertRaises(OSError):
self.env['ir.attachment']._file_write(b'test', 'test')
def test_write_create_url_binary_attachment(self):
with self.assertRaisesRegex(ValidationError, r"Sorry, you are not allowed to write on this document"):
self.Attachments.create({'name': 'Py', 'url': '/blabla.js', 'raw': b'Something'})
with self.assertRaisesRegex(ValidationError, r"Sorry, you are not allowed to write on this document"):
self.Attachments.create({'name': 'Py', 'url': '/blabla.js', 'raw': b'Something'})
with self.assertRaisesRegex(ValidationError, r"Sorry, you are not allowed to write on this document"):
self.Attachments.with_context(default_url='/blabla.js').create({'name': 'Py', 'raw': b'Something'})
existing_attachment = self.Attachments.create({'name': 'aaa'})
with self.assertRaisesRegex(ValidationError, r"Sorry, you are not allowed to write on this document"):
existing_attachment.url = '/blabla.js'
existing_attachment.type = 'url'
existing_attachment.url = '/blabla.js'
with self.assertRaisesRegex(ValidationError, r"Sorry, you are not allowed to write on this document"):
existing_attachment.type = 'binary'

View file

@ -1,19 +1,28 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# ruff: noqa: E201, E272, E301, E306
# ruff: noqa: E201, E241, E272, E301, E306
import contextlib
import secrets
import textwrap
import time
from contextlib import closing
from datetime import timedelta
from unittest.mock import patch
from freezegun import freeze_time
from odoo import fields
from odoo.tests.common import TransactionCase, RecordCapturer
from odoo.tests.common import RecordCapturer, TransactionCase
from odoo.tools import mute_logger
from odoo.addons.base.models.ir_cron import MIN_FAILURE_COUNT_BEFORE_DEACTIVATION, MIN_DELTA_BEFORE_DEACTIVATION
from odoo.addons.base.models.ir_cron import (
MIN_DELTA_BEFORE_DEACTIVATION,
MIN_FAILURE_COUNT_BEFORE_DEACTIVATION,
MIN_TIME_PER_JOB,
CompletionStatus,
IrCron,
)
class CronMixinCase:
@ -84,6 +93,10 @@ class TestIrCron(TransactionCase, CronMixinCase):
self.env['ir.cron.trigger'].search(domain).unlink()
self.env['ir.cron.progress'].search(domain).unlink()
# this ensures that cr.now() returns the frozen datetime, which is
# useful for knowing remaining jobs after "some time"
self.patch(self.env.cr, 'now', self.frozen_datetime)
def test_cron_direct_trigger(self):
self.cron.code = textwrap.dedent(f"""\
model.search(
@ -93,7 +106,10 @@ class TestIrCron(TransactionCase, CronMixinCase):
)
""")
self.cron.method_direct_trigger()
registry = self.cron.pool
with self.enter_registry_test_mode(), patch.object(registry, 'cursor', side_effect=registry.cursor, autospec=True) as cursor_method:
self.cron.method_direct_trigger()
self.assertEqual(cursor_method.call_count, 1, "Should create a new transaction for direct trigger")
self.assertEqual(self.cron.lastcall, fields.Datetime.now())
self.assertEqual(self.partner.name, 'You have been CRONWNED')
@ -197,6 +213,7 @@ class TestIrCron(TransactionCase, CronMixinCase):
default_progress_values = {'done': 0, 'remaining': 0, 'timed_out_counter': 0}
ten_days_ago = fields.Datetime.now() - MIN_DELTA_BEFORE_DEACTIVATION - timedelta(days=2)
almost_failed = MIN_FAILURE_COUNT_BEFORE_DEACTIVATION - 1
frozen_datetime = self.frozen_datetime
def nothing(cron):
state = {'call_count': 0}
@ -208,9 +225,10 @@ class TestIrCron(TransactionCase, CronMixinCase):
state = {'call_count': 0}
CALL_TARGET = 11
def f(self):
frozen_datetime.tick(delta=timedelta(seconds=1))
state['call_count'] += 1
self.env['ir.cron']._notify_progress(
done=1,
self.env['ir.cron']._commit_progress(
processed=1,
remaining=CALL_TARGET - state['call_count']
)
return f, state
@ -220,12 +238,25 @@ class TestIrCron(TransactionCase, CronMixinCase):
CALL_TARGET = 5
def f(self):
state['call_count'] += 1
self.env['ir.cron']._notify_progress(
done=1,
self.env['ir.cron']._commit_progress(
processed=1,
remaining=CALL_TARGET - state['call_count']
)
return f, state
def end_time(cron):
state = {
'call_count': 0,
'remaining': MIN_TIME_PER_JOB + 1,
}
def f(self):
state['call_count'] += 1
while self.env['ir.cron']._commit_progress(remaining=state['remaining']):
state['remaining'] -= 1
frozen_datetime.tick(delta=timedelta(seconds=1))
self.env['ir.cron']._commit_progress(1)
return f, state
def failure(cron):
state = {'call_count': 0}
def f(self):
@ -238,8 +269,8 @@ class TestIrCron(TransactionCase, CronMixinCase):
CALL_TARGET = 5
def f(self):
state['call_count'] += 1
self.env['ir.cron']._notify_progress(
done=1,
self.env['ir.cron']._commit_progress(
processed=1,
remaining=CALL_TARGET - state['call_count']
)
self.env.cr.commit()
@ -250,7 +281,7 @@ class TestIrCron(TransactionCase, CronMixinCase):
state = {'call_count': 0}
def f(self):
state['call_count'] += 1
self.env['ir.cron']._notify_progress(done=1, remaining=0)
self.env['ir.cron']._commit_progress(1, remaining=0)
self.env.cr.commit()
raise ValueError
return f, state
@ -264,6 +295,7 @@ class TestIrCron(TransactionCase, CronMixinCase):
( eleven_success, almost_failed, True, 10, 10, 0, True),
( five_success, 0, False, 5, 5, 0, True),
( five_success, almost_failed, False, 5, 5, 0, True),
( end_time, 0, True, 2, 10, 0, True),
( failure, 0, False, 1, 0, 1, True),
( failure, almost_failed, False, 1, 0, 0, False),
(failure_partial, 0, False, 5, 5, 1, True),
@ -284,24 +316,21 @@ class TestIrCron(TransactionCase, CronMixinCase):
self.cron._trigger()
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
cb, state = cb(self.cron)
try:
with self.enter_registry_test_mode():
cb, state = cb(self.cron)
with mute_logger('odoo.addons.base.models.ir_cron'),\
patch.object(self.registry['ir.actions.server'], 'run', cb):
patch.object(self.registry['ir.actions.server'], 'run', cb),\
self.registry.cursor() as cr:
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
cr,
{**self.cron.read(load=None)[0], **default_progress_values}
)
finally:
self.registry.leave_test_mode()
self.cron.invalidate_recordset()
capture.records.invalidate_recordset()
self.assertEqual(self.cron.id in [job['id'] for job in self.cron._get_all_ready_jobs(self.env.cr)], trigger)
self.assertEqual(state['call_count'], call_count)
self.assertEqual(Progress.search_count([('cron_id', '=', self.cron.id), ('done', '=', 1)]), done_count)
self.assertEqual(sum(Progress.search([('cron_id', '=', self.cron.id), ('done', '>=', 1)]).mapped('done')), done_count)
self.assertEqual(self.cron.failure_count, fail_count)
self.assertEqual(self.cron.active, active)
@ -309,33 +338,36 @@ class TestIrCron(TransactionCase, CronMixinCase):
Trigger = self.env['ir.cron.trigger']
Progress = self.env['ir.cron.progress']
default_progress_values = {'done': 0, 'remaining': 0, 'timed_out_counter': 0}
frozen_datetime = self.frozen_datetime
def make_run(cron):
state = {'call_count': 0}
CALL_TARGET = 11
CALL_TARGET = 31
mocked_run_state = {'call_count': 0, 'duration': 0}
def run(self):
state['call_count'] += 1
self.env['ir.cron']._notify_progress(done=1, remaining=CALL_TARGET - state['call_count'])
return run, state
def mocked_run(self):
frozen_datetime.tick(delta=timedelta(seconds=mocked_run_state['duration']))
mocked_run_state['call_count'] += 1
self.env['ir.cron']._commit_progress(
processed=1,
remaining=CALL_TARGET - mocked_run_state['call_count'],
)
self.cron._trigger()
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
mocked_run, mocked_run_state = make_run(self.cron)
try:
with patch.object(self.registry['ir.actions.server'], 'run', mocked_run):
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
{**self.cron.read(load=None)[0], **default_progress_values}
)
finally:
self.registry.leave_test_mode()
with (
self.enter_registry_test_mode(),
patch.object(self.registry['ir.actions.server'], 'run', mocked_run),
self.registry.cursor() as cr,
):
# make each run 2 seconds, so that it is run 10 times, 20 seconds in total
mocked_run_state['duration'] = 2
self.registry['ir.cron']._process_job(
cr,
{**self.cron.read(load=None)[0], **default_progress_values}
)
self.assertEqual(
mocked_run_state['call_count'], 10,
'`run` should have been called ten times',
'`run` should have been called 10 times',
)
self.assertEqual(
Progress.search_count([('done', '=', 1), ('cron_id', '=', self.cron.id)]), 10,
@ -347,16 +379,41 @@ class TestIrCron(TransactionCase, CronMixinCase):
)
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
try:
with patch.object(self.registry['ir.actions.server'], 'run', mocked_run):
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
{**self.cron.read(load=None)[0], **default_progress_values}
)
finally:
self.registry.leave_test_mode()
with (
self.enter_registry_test_mode(),
patch.object(self.registry['ir.actions.server'], 'run', mocked_run),
self.registry.cursor() as cr,
):
# make each run 0.5 seconds, so that it is run 20 times, 10 seconds in total
mocked_run_state['duration'] = 0.5
self.registry['ir.cron']._process_job(
cr,
{**self.cron.read(load=None)[0], **default_progress_values}
)
self.assertEqual(
mocked_run_state['call_count'], 30,
'`run` should have been called 10 times',
)
self.assertEqual(
Progress.search_count([('done', '=', 1), ('cron_id', '=', self.cron.id)]), 30,
'There should be 30 progress log for this cron',
)
self.assertEqual(
Trigger.search_count([('cron_id', '=', self.cron.id)]), 1,
"One trigger should have been kept",
)
self.env.flush_all()
with (
self.enter_registry_test_mode(),
patch.object(self.registry['ir.actions.server'], 'run', mocked_run),
self.registry.cursor() as cr,
):
self.registry['ir.cron']._process_job(
cr,
{**self.cron.read(load=None)[0], **default_progress_values}
)
ready_jobs = self.registry['ir.cron']._get_all_ready_jobs(self.cr)
self.assertNotIn(
@ -364,31 +421,29 @@ class TestIrCron(TransactionCase, CronMixinCase):
'The cron has finished executing'
)
self.assertEqual(
mocked_run_state['call_count'], 10 + 1,
mocked_run_state['call_count'], 31,
'`run` should have been called one additional time',
)
self.assertEqual(
Progress.search_count([('done', '=', 1), ('cron_id', '=', self.cron.id)]), 11,
Progress.search_count([('done', '=', 1), ('cron_id', '=', self.cron.id)]), 31,
'There should be 11 progress log for this cron',
)
def test_cron_failed_increase(self):
self.cron._trigger()
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
default_progress = {'done': 0, 'remaining': 0, 'timed_out_counter': 0}
try:
with self.enter_registry_test_mode():
with (
patch.object(self.registry['ir.cron'], '_callback', side_effect=Exception),
patch.object(self.registry['ir.cron'], '_notify_admin') as notify,
mute_logger('odoo.addons.base.models.ir_cron'),
self.registry.cursor() as cr,
):
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
cr,
{**self.cron.read(load=None)[0], **default_progress}
)
finally:
self.registry.leave_test_mode()
self.env.invalidate_all()
self.assertEqual(self.cron.failure_count, 1, 'The cron should have failed once')
@ -399,19 +454,17 @@ class TestIrCron(TransactionCase, CronMixinCase):
self.cron._trigger()
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
try:
with (
patch.object(self.registry['ir.cron'], '_callback', side_effect=Exception),
patch.object(self.registry['ir.cron'], '_notify_admin') as notify,
):
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
{**self.cron.read(load=None)[0], **default_progress}
)
finally:
self.registry.leave_test_mode()
with (
self.enter_registry_test_mode(),
patch.object(self.registry['ir.cron'], '_callback', side_effect=Exception),
patch.object(self.registry['ir.cron'], '_notify_admin') as notify,
mute_logger('odoo.addons.base.models.ir_cron'),
self.registry.cursor() as cr,
):
self.registry['ir.cron']._process_job(
cr,
{**self.cron.read(load=None)[0], **default_progress}
)
self.env.invalidate_all()
self.assertEqual(self.cron.failure_count, 5, 'The cron should have failed one more time but not reset (due to time)')
@ -423,19 +476,17 @@ class TestIrCron(TransactionCase, CronMixinCase):
self.cron._trigger()
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
try:
with (
patch.object(self.registry['ir.cron'], '_callback', side_effect=Exception),
patch.object(self.registry['ir.cron'], '_notify_admin') as notify,
):
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
{**self.cron.read(load=None)[0], **default_progress}
)
finally:
self.registry.leave_test_mode()
with (
self.enter_registry_test_mode(),
patch.object(self.registry['ir.cron'], '_callback', side_effect=Exception),
patch.object(self.registry['ir.cron'], '_notify_admin') as notify,
mute_logger('odoo.addons.base.models.ir_cron'),
self.registry.cursor() as cr,
):
self.registry['ir.cron']._process_job(
cr,
{**self.cron.read(load=None)[0], **default_progress}
)
self.env.invalidate_all()
self.assertEqual(self.cron.failure_count, 0, 'The cron should have failed one more time and reset to 0')
@ -451,32 +502,22 @@ class TestIrCron(TransactionCase, CronMixinCase):
'timed_out_counter': 3,
}])
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
try:
with mute_logger('odoo.addons.base.models.ir_cron'):
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
{**progress.read(fields=['done', 'remaining', 'timed_out_counter'], load=None)[0], 'progress_id': progress.id, **self.cron.read(load=None)[0]}
)
finally:
self.registry.leave_test_mode()
with self.enter_registry_test_mode(), mute_logger('odoo.addons.base.models.ir_cron'), self.registry.cursor() as cr:
self.registry['ir.cron']._process_job(
cr,
{**progress.read(fields=['done', 'remaining', 'timed_out_counter'], load=None)[0], 'progress_id': progress.id, **self.cron.read(load=None)[0]}
)
self.env.invalidate_all()
self.assertEqual(self.cron.failure_count, 1, 'The cron should have failed once')
self.assertEqual(self.cron.active, True, 'The cron should still be active')
self.cron._trigger()
self.registry.enter_test_mode(self.cr)
try:
with self.enter_registry_test_mode(), self.registry.cursor() as cr:
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
cr,
{**progress.read(fields=['done', 'remaining', 'timed_out_counter'], load=None)[0], 'progress_id': progress.id, **self.cron.read(load=None)[0]}
)
finally:
self.registry.leave_test_mode()
self.env.invalidate_all()
self.assertEqual(self.cron.failure_count, 0, 'The cron should have succeeded and reset the counter')
@ -490,32 +531,22 @@ class TestIrCron(TransactionCase, CronMixinCase):
'timed_out_counter': 3,
}])
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
try:
with mute_logger('odoo.addons.base.models.ir_cron'):
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
{**progress.read(fields=['done', 'remaining', 'timed_out_counter'], load=None)[0], 'progress_id': progress.id, **self.cron.read(load=None)[0]}
)
finally:
self.registry.leave_test_mode()
with self.enter_registry_test_mode(), mute_logger('odoo.addons.base.models.ir_cron'), self.registry.cursor() as cr:
self.registry['ir.cron']._process_job(
cr,
{**progress.read(fields=['done', 'remaining', 'timed_out_counter'], load=None)[0], 'progress_id': progress.id, **self.cron.read(load=None)[0]}
)
self.env.invalidate_all()
self.assertEqual(self.cron.failure_count, 1, 'The cron should have failed once')
self.assertEqual(self.cron.active, True, 'The cron should still be active')
self.cron._trigger()
self.registry.enter_test_mode(self.cr)
try:
with self.enter_registry_test_mode(), self.registry.cursor() as cr:
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
cr,
{**progress.read(fields=['done', 'remaining', 'timed_out_counter'], load=None)[0], 'progress_id': progress.id, **self.cron.read(load=None)[0]}
)
finally:
self.registry.leave_test_mode()
self.env.invalidate_all()
self.assertEqual(self.cron.failure_count, 0, 'The cron should have succeeded and reset the counter')
@ -525,24 +556,115 @@ class TestIrCron(TransactionCase, CronMixinCase):
job = self.env['ir.cron']._acquire_one_job(self.cr, self.cron.id)
self.assertEqual(job, None, "No error should be thrown, job should just be none")
@contextlib.contextmanager
def patch_cron_process_jobs_loop(self):
""" Yield a simplified function for testing `_process_jobs_loop`. """
self.cron.active = True
self.cron.search([('id', 'not in', self.cron.ids)]).active = False # deactivate all other for the test
with (
self.enter_registry_test_mode(),
self.registry.cursor() as cr,
):
def process_jobs(**kw):
kw.setdefault('job_ids', self.cron.ids)
return IrCron._process_jobs_loop(cr, **kw)
yield process_jobs
def patch_run_job(self, return_value=CompletionStatus.FULLY_DONE):
return patch.object(self.registry['ir.cron'], '_run_job', return_value=return_value)
def test_cron_process_jobs_simple(self):
with self.patch_cron_process_jobs_loop() as process_jobs, self.patch_run_job() as run:
cron = self.cron.create(self._get_cron_data(self.env))
cron._trigger()
self.cron._trigger()
job_ids = cron.ids + self.cron.ids
process_jobs(job_ids=job_ids)
self.assertTrue(all(
any(job_id == call.args[0]['id'] for call in run.mock_calls)
for job_id in job_ids
), "all jobs called at least once")
def test_cron_process_jobs_status_partial(self):
with self.patch_cron_process_jobs_loop() as process_jobs, self.patch_run_job(CompletionStatus.PARTIALLY_DONE) as run:
self.cron._trigger()
process_jobs()
run.assert_called_once()
def test_cron_process_jobs_status_failed(self):
with self.patch_cron_process_jobs_loop() as process_jobs, self.patch_run_job(CompletionStatus.FAILED) as run:
self.cron._trigger()
process_jobs()
run.assert_called_once()
def test_cron_process_jobs_locked(self):
with (
self.patch_cron_process_jobs_loop() as process_jobs,
self.patch_run_job() as run,
# simulate that record is locked
patch.object(IrCron, '_acquire_one_job', return_value=None) as acquire,
patch.object(time, 'monotonic', side_effect=lambda: 42 + run.call_count),
):
self.cron._trigger()
process_jobs()
run.assert_not_called()
acquire.assert_called_once()
def test_cron_commit_progress(self):
with self.enter_registry_test_mode(), self.registry.cursor() as cr:
cron = self.cron.with_env(self.cron.env(cr=cr, context={'cron_id': self.cron.id}))
# check remaining time
cron, progress = cron._add_progress()
result = cron._commit_progress()
self.assertEqual(result, float('inf'))
result = cron.with_context(cron_end_time=time.monotonic() - 1)._commit_progress()
self.assertEqual(result, 0)
# check remaining count
cron, progress = cron._add_progress()
cron._commit_progress(remaining=5)
self.assertEqual(progress.done, 0)
self.assertEqual(progress.remaining, 5)
cron._commit_progress(processed=3, remaining=7)
self.assertEqual(progress.done, 3)
self.assertEqual(progress.remaining, 7)
# check processed count
cron, progress = cron._add_progress()
cron._commit_progress(remaining=5)
cron._commit_progress(2)
self.assertEqual(progress.done, 2)
self.assertEqual(progress.remaining, 3)
cron._commit_progress(2)
self.assertEqual(progress.done, 4)
self.assertEqual(progress.remaining, 1)
cron._commit_progress(2)
self.assertEqual(progress.done, 6)
self.assertEqual(progress.remaining, 0)
# check deactivate flag
cron, progress = cron._add_progress()
cron._commit_progress(1, deactivate=True)
self.assertEqual(progress.done, 1)
self.assertEqual(progress.deactivate, True)
cron._commit_progress(1)
self.assertEqual(progress.done, 2)
self.assertEqual(progress.deactivate, True)
def test_cron_deactivate(self):
default_progress_values = {'done': 0, 'remaining': 0, 'timed_out_counter': 0}
def mocked_run(self):
self.env['ir.cron']._notify_progress(done=1, remaining=0, deactivate=True)
self.env['ir.cron']._commit_progress(processed=1, remaining=0, deactivate=True)
self.cron._trigger()
self.env.flush_all()
self.registry.enter_test_mode(self.cr)
try:
with patch.object(self.registry['ir.actions.server'], 'run', mocked_run):
self.registry['ir.cron']._process_job(
self.registry.db_name,
self.registry.cursor(),
{**self.cron.read(load=None)[0], **default_progress_values}
)
finally:
self.registry.leave_test_mode()
with self.enter_registry_test_mode(), patch.object(self.registry['ir.actions.server'], 'run', mocked_run), self.registry.cursor() as cr:
self.registry['ir.cron']._process_job(
cr,
{**self.cron.read(load=None)[0], **default_progress_values}
)
self.env.invalidate_all()
self.assertFalse(self.cron.active)

View file

@ -71,15 +71,15 @@ class TestIrDefault(TransactionCase):
{})
# default with a condition
IrDefault.search([('field_id.model', '=', 'res.partner.title')]).unlink()
IrDefault.set('res.partner.title', 'shortcut', 'X')
IrDefault.set('res.partner.title', 'shortcut', 'Mr', condition='name=Mister')
self.assertEqual(IrDefault._get_model_defaults('res.partner.title'),
{'shortcut': 'X'})
self.assertEqual(IrDefault._get_model_defaults('res.partner.title', condition='name=Miss'),
IrDefault.search([('field_id.model', '=', 'res.partner')]).unlink()
IrDefault.set('res.partner', 'street', 'X')
IrDefault.set('res.partner', 'street', 'Mr', condition='name=Mister')
self.assertEqual(IrDefault._get_model_defaults('res.partner'),
{'street': 'X'})
self.assertEqual(IrDefault._get_model_defaults('res.partner', condition='name=Miss'),
{})
self.assertEqual(IrDefault._get_model_defaults('res.partner.title', condition='name=Mister'),
{'shortcut': 'Mr'})
self.assertEqual(IrDefault._get_model_defaults('res.partner', condition='name=Mister'),
{'street': 'Mr'})
def test_invalid(self):
""" check error cases with 'ir.default' """
@ -101,12 +101,12 @@ class TestIrDefault(TransactionCase):
IrDefault.search([('field_id.model', '=', 'res.partner')]).unlink()
# set a record as a default value
title = self.env['res.partner.title'].create({'name': 'President'})
IrDefault.set('res.partner', 'title', title.id)
self.assertEqual(IrDefault._get_model_defaults('res.partner'), {'title': title.id})
country_id = self.env['res.country'].create({'name': 'country', 'code': 'ZZ'})
IrDefault.set('res.partner', 'country_id', country_id.id)
self.assertEqual(IrDefault._get_model_defaults('res.partner'), {'country_id': country_id.id})
# delete the record, and check the presence of the default value
title.unlink()
country_id.unlink()
self.assertEqual(IrDefault._get_model_defaults('res.partner'), {})
def test_multi_company_defaults(self):

View file

@ -101,26 +101,42 @@ class TestEmbeddedActionsBase(TransactionCaseWithUserDemo):
not be returned in the read method")
def test_groups_on_embedded_action(self):
arbitrary_group = self.env['res.groups'].create({
# Create user groups with implied permissions
nested_arbitrary_group = self.env['res.groups'].create({
'name': 'arbitrary_group',
'implied_ids': [(6, 0, [self.ref('base.group_user')])],
})
embedded_action_custo = self.env['ir.embedded.actions'].create({
'name': 'EmbeddedActionCusto',
'parent_res_model': 'res.partner',
'parent_action_id': self.parent_action.id,
'action_id': self.action_2.id,
'groups_ids': [(6, 0, [arbitrary_group.id])]
arbitrary_group = self.env['res.groups'].create({
'name': 'arbitrary_group',
'implied_ids': [(6, 0, [nested_arbitrary_group.id])],
})
embedded_action1, embedded_action2 = self.env['ir.embedded.actions'].create([
{
'name': 'EmbeddedActionCusto',
'parent_res_model': 'res.partner',
'parent_action_id': self.parent_action.id,
'action_id': self.action_2.id,
'groups_ids': [(6, 0, [nested_arbitrary_group.id])],
},
{
'name': 'EmbeddedActionCusto2',
'parent_res_model': 'res.partner',
'parent_action_id': self.parent_action.id,
'action_id': self.action_2.id,
'groups_ids': [(6, 0, [arbitrary_group.id])],
}
])
res = self.get_embedded_actions_ids(self.parent_action)
self.assertEqual(len(res), 2, "There should be 2 embedded records linked to the parent action")
self.assertTrue(self.embedded_action_1.id in res and self.embedded_action_2.id in res, "The correct embedded actions\
should be in embedded_actions")
self.env.user.write({'groups_id': [(4, arbitrary_group.id)]})
self.env.user.write({'group_ids': [(4, arbitrary_group.id)]})
res = self.get_embedded_actions_ids(self.parent_action)
self.assertEqual(len(res), 3, "There should be 3 embedded records linked to the parent action")
self.assertTrue(self.embedded_action_1.id in res and self.embedded_action_2.id in res and embedded_action_custo.id in res, "The correct embedded actions\
should be in embedded_actions")
self.assertEqual(len(res), 4, "There should be 4 embedded records linked to the parent action")
self.assertTrue(
self.embedded_action_1.id in res and self.embedded_action_2.id in res and embedded_action1.id in res and embedded_action2.id in res,
"The correct embedded actions should be in embedded_actions",
)
def test_create_embedded_action_with_action_and_python_method(self):
embedded_action1, embedded_action2 = self.env['ir.embedded.actions'].create([

View file

@ -35,254 +35,70 @@ class TestGetFilters(FiltersCase):
def test_own_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=self.USER_ID, model_id='ir.filters'))
dict(name='a', user_ids=[self.USER_ID], model_id='ir.filters'),
dict(name='b', user_ids=[self.USER_ID], model_id='ir.filters'),
dict(name='c', user_ids=[self.USER_ID], model_id='ir.filters'),
dict(name='d', user_ids=[self.USER_ID], model_id='ir.filters'))
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='b', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='d', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
dict(name='a', is_default=False, user_ids=[self.USER_NG[0]], domain='[]', context='{}', sort='[]'),
dict(name='b', is_default=False, user_ids=[self.USER_NG[0]], domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_ids=[self.USER_NG[0]], domain='[]', context='{}', sort='[]'),
dict(name='d', is_default=False, user_ids=[self.USER_NG[0]], domain='[]', context='{}', sort='[]'),
])
def test_global_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
dict(name='c', user_id=False, model_id='ir.filters'),
dict(name='d', user_id=False, model_id='ir.filters'),
dict(name='a', user_ids=[], model_id='ir.filters'),
dict(name='b', user_ids=[], model_id='ir.filters'),
dict(name='c', user_ids=[], model_id='ir.filters'),
dict(name='d', user_ids=[], model_id='ir.filters'),
)
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='b', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='d', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='a', is_default=False, user_ids=[], domain='[]', context='{}', sort='[]'),
dict(name='b', is_default=False, user_ids=[], domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_ids=[], domain='[]', context='{}', sort='[]'),
dict(name='d', is_default=False, user_ids=[], domain='[]', context='{}', sort='[]'),
])
def test_no_third_party_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=ADMIN_USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=ADMIN_USER_ID, model_id='ir.filters') )
dict(name='a', user_ids=[], model_id='ir.filters'),
dict(name='b', user_ids=[ADMIN_USER_ID], model_id='ir.filters'),
dict(name='c', user_ids=[self.USER_ID], model_id='ir.filters'),
dict(name='d', user_ids=[ADMIN_USER_ID], model_id='ir.filters'))
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_id=self.USER_NG, domain='[]', context='{}', sort='[]'),
])
class TestOwnDefaults(FiltersCase):
def test_new_no_filter(self):
"""
When creating a @is_default filter with no existing filter, that new
filter gets the default flag
"""
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=True,
domain='[]', context='{}', sort='[]'),
])
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, the flag should be *moved* from the old to the new filter
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag the flag should be moved
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=self.USER_NG, is_default=True, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=self.USER_NG, is_default=False, domain='[]', context='{}', sort='[]'),
])
class TestGlobalDefaults(FiltersCase):
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='c', user_id=False, is_default=True, domain='[]', context='{}', sort='[]'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
with self.assertRaises(exceptions.UserError):
Filters.create_or_replace({
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
with self.assertRaises(exceptions.UserError):
Filters.create_or_replace({
'name': 'a',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_default_filter(self):
"""
Replacing the current default global filter should not generate any error
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.env['ir.filters'].with_user(self.USER_ID)
context_value = "{'some_key': True}"
Filters.create_or_replace({
'name': 'b',
'model_id': 'ir.filters',
'user_id': False,
'context': context_value,
'is_default': True,
})
filters = Filters.get_filters('ir.filters')
self.assertItemsEqual(noid(filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}', sort='[]'),
dict(name='b', user_id=False, is_default=True, domain='[]', context=context_value, sort='[]'),
dict(name='a', is_default=False, user_ids=[], domain='[]', context='{}', sort='[]'),
dict(name='c', is_default=False, user_ids=[self.USER_NG[0]], domain='[]', context='{}', sort='[]'),
])
@tagged('post_install', '-at_install', 'migration')
class TestAllFilters(TransactionCase):
def check_filter(self, name, model, domain, fields, groupby, order, context):
def check_filter(self, name, model, domain, aggregates, groupby, order, context):
if groupby:
try:
self.env[model].with_context(context).read_group(domain, fields, groupby, orderby=order)
Model = self.env[model].with_context(context)
groupby = [groupby] if isinstance(groupby, str) else groupby
groupby = [
f"{group_spec}:month" if (
":" not in group_spec and
group_spec in Model._fields and
Model._fields[group_spec].type in ('date, datetime')
) else group_spec
for group_spec in groupby
]
Model.formatted_read_group(domain, groupby, aggregates, order=order)
except ValueError as e:
raise self.failureException("Test filter '%s' failed: %s" % (name, e)) from None
except KeyError as e:
@ -304,7 +120,7 @@ class TestAllFilters(TransactionCase):
name=filter_.name,
model=filter_.model_id,
domain=filter_._get_eval_domain(),
fields=[field.split(':')[0] for field in (groupby or [])],
aggregates=['__count'],
groupby=groupby,
order=','.join(ast.literal_eval(filter_.sort)),
context=context,
@ -340,18 +156,18 @@ class TestEmbeddedFilters(FiltersCase):
def test_global_filters_with_embedded_action(self):
Filters = self.env['ir.filters'].with_user(self.USER_ID)
Filters.create_or_replace({
Filters.create_filter({
'name': 'a',
'model_id': 'ir.filters',
'user_id': False,
'user_ids': [],
'is_default': True,
'embedded_action_id': self.embedded_action_1.id,
'embedded_parent_res_id': 1
})
Filters.create_or_replace({
Filters.create_filter({
'name': 'b',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'user_ids': [self.USER_ID],
'is_default': False,
'embedded_action_id': self.embedded_action_2.id,
'embedded_parent_res_id': 1
@ -359,7 +175,7 @@ class TestEmbeddedFilters(FiltersCase):
# If embedded_action_id and embedded_parent_res_id are set, should return the corresponding filter
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters', embedded_action_id=self.embedded_action_1.id, embedded_parent_res_id=1)
self.assertItemsEqual(noid(filters), [dict(name='a', is_default=True, user_id=False, domain='[]', context='{}', sort='[]')])
self.assertItemsEqual(noid(filters), [dict(name='a', is_default=True, user_ids=[], domain='[]', context='{}', sort='[]')])
# Check that the filter is correctly linked to one embedded_parent_res_id and is not returned if another one is set
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters', embedded_action_id=self.embedded_action_1.id, embedded_parent_res_id=2)
@ -367,7 +183,7 @@ class TestEmbeddedFilters(FiltersCase):
# Check that a shared filter can be fetched with another user
filters = self.env['ir.filters'].with_user(ADMIN_USER_ID).get_filters('ir.filters', embedded_action_id=self.embedded_action_1.id, embedded_parent_res_id=1)
self.assertItemsEqual(noid(filters), [dict(name='a', is_default=True, user_id=False, domain='[]', context='{}', sort='[]')])
self.assertItemsEqual(noid(filters), [dict(name='a', is_default=True, user_ids=[], domain='[]', context='{}', sort='[]')])
# If embedded_action_id and embedded_parent_res_id are not set, should return no filters
filters = self.env['ir.filters'].with_user(self.USER_ID).get_filters('ir.filters')
@ -375,18 +191,18 @@ class TestEmbeddedFilters(FiltersCase):
def test_global_filters_with_no_embedded_action(self):
Filters = self.env['ir.filters'].with_user(self.USER_ID)
filter_a = Filters.create_or_replace({
filter_a = Filters.create_filter({
'name': 'a',
'model_id': 'ir.filters',
'user_id': False,
'user_ids': [],
'is_default': True,
'embedded_action_id': False,
'embedded_parent_res_id': 0,
})
filter_b = Filters.create_or_replace({
filter_b = Filters.create_filter({
'name': 'b',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'user_ids': [self.USER_ID],
'is_default': True,
'embedded_action_id': False,
'embedded_parent_res_id': 1,

View file

@ -9,7 +9,7 @@ from unittest.mock import patch
import psycopg2.errors
from odoo import tools
from odoo.addons.base.tests import test_mail_examples
from odoo.addons.base.tests import mail_examples
from odoo.addons.base.tests.common import MockSmtplibCase
from odoo.tests import tagged, users
from odoo.tests.common import TransactionCase
@ -40,7 +40,7 @@ class EmailConfigCase(TransactionCase):
@patch.dict(config.options, {"email_from": "settings@example.com"})
def test_default_email_from(self):
""" Email from setting is respected and comes from configuration. """
message = self.env["ir.mail_server"].build_email(
message = self.env["ir.mail_server"]._build_email__(
False, "recipient@example.com", "Subject",
"The body of an email",
)
@ -108,8 +108,8 @@ class TestIrMailServer(TransactionCase, MockSmtplibCase):
'content',
'<p>content</p>',
'<head><meta content="text/html; charset=utf-8" http-equiv="Content-Type"></head><body><p>content</p></body>',
test_mail_examples.MISC_HTML_SOURCE,
test_mail_examples.QUOTE_THUNDERBIRD_HTML,
mail_examples.MISC_HTML_SOURCE,
mail_examples.QUOTE_THUNDERBIRD_HTML,
]
expected_list = [
'content',
@ -119,7 +119,7 @@ class TestIrMailServer(TransactionCase, MockSmtplibCase):
'On 01/05/2016 10:24 AM, Raoul\nPoilvache wrote:\n\n* Test reply. The suite. *\n\n--\nRaoul Poilvache\n\nTop cool !!!\n\n--\nRaoul Poilvache',
]
for body, expected in zip(bodies, expected_list):
message = self.env['ir.mail_server'].build_email(
message = self.env['ir.mail_server']._build_email__(
'john.doe@from.example.com',
'destinataire@to.example.com',
body=body,
@ -151,6 +151,7 @@ class TestIrMailServer(TransactionCase, MockSmtplibCase):
def test_mail_server_get_test_email_from(self):
""" Test the email used to test the mail server connection. Check
from_filter parsing / default fallback value. """
self.env.user.email = 'mitchell.admin@example.com'
test_server = self.env['ir.mail_server'].create({
'from_filter': 'example_2.com, example_3.com',
'name': 'Test Server',
@ -258,7 +259,7 @@ class TestIrMailServer(TransactionCase, MockSmtplibCase):
with self.subTest(mail_from=mail_from, provide_smtp=provide_smtp):
with self.mock_smtplib_connection():
if provide_smtp:
smtp_session = IrMailServer.connect(smtp_from=mail_from)
smtp_session = IrMailServer._connect__(smtp_from=mail_from)
message = self._build_email(mail_from=mail_from)
IrMailServer.send_email(message, smtp_session=smtp_session)
else:
@ -283,7 +284,7 @@ class TestIrMailServer(TransactionCase, MockSmtplibCase):
for provide_smtp in [False, True]:
with self.mock_smtplib_connection():
if provide_smtp:
smtp_session = IrMailServer.connect(smtp_from='"Name" <test@unknown_domain.com>')
smtp_session = IrMailServer._connect__(smtp_from='"Name" <test@unknown_domain.com>')
message = self._build_email(mail_from='"Name" <test@unknown_domain.com>')
IrMailServer.send_email(message, smtp_session=smtp_session)
else:
@ -317,7 +318,7 @@ class TestIrMailServer(TransactionCase, MockSmtplibCase):
mail_server, smtp_from = IrMailServer._find_mail_server(email_from='"Name" <test@unknown_domain.com>')
self.assertEqual(mail_server, context_server)
self.assertEqual(smtp_from, "notification@context.example.com")
smtp_session = IrMailServer.connect(smtp_from=smtp_from)
smtp_session = IrMailServer._connect__(smtp_from=smtp_from)
message = self._build_email(mail_from='"Name" <test@unknown_domain.com>')
IrMailServer.send_email(message, smtp_session=smtp_session)
@ -401,7 +402,7 @@ class TestIrMailServer(TransactionCase, MockSmtplibCase):
with self.subTest(mail_from=mail_from, provide_smtp=provide_smtp):
with self.mock_smtplib_connection():
if provide_smtp:
smtp_session = IrMailServer.connect(smtp_from=mail_from)
smtp_session = IrMailServer._connect__(smtp_from=mail_from)
message = self._build_email(mail_from=mail_from)
IrMailServer.send_email(message, smtp_session=smtp_session)
else:
@ -482,7 +483,7 @@ class TestIrMailServer(TransactionCase, MockSmtplibCase):
attachments = [('test.eml', eml_content, 'message/rfc822')]
# Build the email with the .eml attachment
message = IrMailServer.build_email(
message = IrMailServer._build_email__(
email_from='john.doe@from.example.com',
email_to='destinataire@to.example.com',
subject='Subject with .eml attachment',

View file

@ -7,14 +7,16 @@ import ssl
import unittest
import warnings
from base64 import b64encode
from os import getenv
from pathlib import Path
from unittest.mock import patch
from socket import getaddrinfo # keep a reference on the non-patched function
from unittest.mock import patch
from odoo import modules
from odoo.exceptions import UserError
from odoo.tools import config, file_path, mute_logger
from .common import TransactionCaseWithUserDemo
from odoo.addons.base.models.ir_mail_server import IrMail_Server
try:
import aiosmtpd
@ -29,6 +31,11 @@ PASSWORD = 'secretpassword'
_openssl = shutil.which('openssl')
_logger = logging.getLogger(__name__)
if getenv('ODOO_RUNBOT') and not _openssl:
_logger.warning("detected runbot environment but openssl not found in PATH, TestIrMailServerSMTPD will be skipped")
if getenv('ODOO_RUNBOT') and not aiosmtpd:
_logger.warning("detected runbot environment but aiosmtpd not installed, TestIrMailServerSMTPD will be skipped")
def _find_free_local_address():
""" Get a triple (family, address, port) on which it possible to bind
@ -148,7 +155,7 @@ class TestIrMailServerSMTPD(TransactionCaseWithUserDemo):
# reactivate sending emails during this test suite, make sure
# NOT TO send emails using another ir.mail_server than the one
# created in setUp!
patcher = patch.object(modules.module, 'current_test', False)
patcher = patch.object(IrMail_Server, '_disable_send', return_value=False)
patcher.start()
self.addCleanup(patcher.stop)
@ -177,6 +184,7 @@ class TestIrMailServerSMTPD(TransactionCaseWithUserDemo):
:param auth_required: whether the server enforces password
authentication or not.
"""
encryption = encryption.removesuffix('_strict')
assert encryption in ('none', 'ssl', 'starttls')
assert encryption == 'none' or ssl_context
@ -249,7 +257,7 @@ class TestIrMailServerSMTPD(TransactionCaseWithUserDemo):
('certificate', "valid client", client_cert, client_key, None),
]
for encryption in ('starttls', 'ssl'):
for encryption in ('starttls', 'starttls_strict', 'ssl', 'ssl_strict'):
mail_server.smtp_encryption = encryption
with self.start_smtpd(encryption, ssl_context, auth_required=False):
for authentication, name, certificate, private_key, error_pattern in matrix:
@ -292,9 +300,7 @@ class TestIrMailServerSMTPD(TransactionCaseWithUserDemo):
matrix = [
# auth_required, password, error_pattern
(False, MISSING, None),
(True, MISSING,
r"The server refused the sender address \(noreply@localhost\) "
r"with error b'5\.7\.0 Authentication required'"),
(True, MISSING, r"The server refused the sender address \(noreply@localhost\) with error .*"),
(True, INVALID,
r"The server has closed the connection unexpectedly\. "
r"Check configuration served on this port number\.\n "
@ -302,7 +308,7 @@ class TestIrMailServerSMTPD(TransactionCaseWithUserDemo):
(True, PASSWORD, None),
]
for encryption in ('none', 'starttls', 'ssl'):
for encryption in ('none', 'starttls', 'starttls_strict', 'ssl', 'ssl_strict'):
mail_server.smtp_encryption = encryption
for auth_required, password, error_pattern in matrix:
mail_server.smtp_user = password and self.user_demo.email
@ -345,8 +351,7 @@ class TestIrMailServerSMTPD(TransactionCaseWithUserDemo):
r"Check configuration served on this port number\.\n "
r"Connection unexpectedly closed: timed out"),
('none', 'starttls',
r"The server refused the sender address \(noreply@localhost\) with error "
r"b'Must issue a STARTTLS command first'"),
r"The server refused the sender address \(noreply@localhost\) with error .*"),
('starttls', 'none',
r"An option is not supported by the server:\n "
r"STARTTLS extension not supported by server\."),
@ -373,6 +378,7 @@ class TestIrMailServerSMTPD(TransactionCaseWithUserDemo):
mail_server.test_smtp_connection()
self.assertRegex(capture.exception.args[0], error_pattern)
@mute_logger('mail.log')
def test_man_in_the_middle_matrix(self):
"""
Simulate that a pirate was successful at intercepting the live
@ -396,22 +402,33 @@ class TestIrMailServerSMTPD(TransactionCaseWithUserDemo):
host_good = 'localhost'
host_bad = 'notlocalhost'
# for now it doesn't raise any error for bad cert/host
matrix = [
# authentication, certificate, hostname, error_pattern
('login', cert_bad, host_good, None),
('login', cert_good, host_bad, None),
('certificate', cert_bad, host_good, None),
('certificate', cert_good, host_bad, None),
# strict?, authentication, certificate, hostname, error_pattern
(False, 'login', cert_bad, host_good, None),
(False, 'login', cert_good, host_bad, None),
(False, 'certificate', cert_bad, host_good, None),
(False, 'certificate', cert_good, host_bad, None),
(True, 'login', cert_bad, host_good,
r"^An SSL exception occurred\. Check connection security type\.\n "
r".*certificate verify failed"),
(True, 'login', cert_good, host_bad,
r"^An SSL exception occurred\. Check connection security type\.\n "
r".*Hostname mismatch, certificate is not valid for 'notlocalhost'"),
(True, 'certificate', cert_bad, host_good,
r"^An SSL exception occurred\. Check connection security type\.\n "
r".*certificate verify failed"),
(True, 'certificate', cert_good, host_bad,
r"^An SSL exception occurred\. Check connection security type\.\n "
r".*CertificateError: hostname 'notlocalhost' doesn't match 'localhost'"),
]
for encryption in ('starttls', 'ssl'):
for authentication, certificate, hostname, error_pattern in matrix:
for strict, authentication, certificate, hostname, error_pattern in matrix:
mail_server.smtp_host = hostname
mail_server.smtp_authentication = authentication
mail_server.smtp_encryption = encryption
mail_server.smtp_encryption = encryption + ('_strict' if strict else '')
with self.subTest(
encryption=encryption,
encryption=encryption + ('_strict' if strict else ''),
authentication=authentication,
cert_good=certificate == cert_good,
host_good=hostname == host_good,

View file

@ -224,210 +224,6 @@ class TestXMLID(TransactionCase):
])
assert_xmlid(xmlid, records[5], f'The xmlid {xmlid} should have been updated with record (not an update) {records[1]}')
class TestIrModel(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# The test mode is necessary in this case. After each test, we call
# registry.reset_changes(), which opens a new cursor to retrieve custom
# models and fields. A regular cursor would correspond to the state of
# the database before setUpClass(), which is not correct. Instead, a
# test cursor will correspond to the state of the database of cls.cr at
# that point, i.e., before the call to setUp().
cls.registry.enter_test_mode(cls.cr)
cls.addClassCleanup(cls.registry.leave_test_mode)
# model and records for banana stages
cls.env['ir.model'].create({
'name': 'Banana Ripeness',
'model': 'x_banana_ripeness',
'field_id': [
Command.create({'name': 'x_name', 'ttype': 'char', 'field_description': 'Name'}),
]
})
# stage values are pairs (id, display_name)
cls.ripeness_green = cls.env['x_banana_ripeness'].name_create('Green')
cls.ripeness_okay = cls.env['x_banana_ripeness'].name_create('Okay, I guess?')
cls.ripeness_gone = cls.env['x_banana_ripeness'].name_create('Walked away on its own')
# model and records for bananas
cls.bananas_model = cls.env['ir.model'].create({
'name': 'Bananas',
'model': 'x_bananas',
'field_id': [
Command.create({'name': 'x_name', 'ttype': 'char', 'field_description': 'Name'}),
Command.create({'name': 'x_length', 'ttype': 'float', 'field_description': 'Length'}),
Command.create({'name': 'x_color', 'ttype': 'integer', 'field_description': 'Color'}),
Command.create({'name': 'x_ripeness_id', 'ttype': 'many2one',
'field_description': 'Ripeness','relation': 'x_banana_ripeness',
'group_expand': True})
]
})
# add non-stored field that is not valid in order
cls.env['ir.model.fields'].create({
'name': 'x_is_yellow',
'field_description': 'Is the banana yellow?',
'ttype': 'boolean',
'model_id': cls.bananas_model.id,
'store': False,
'depends': 'x_color',
'compute': "for banana in self:\n banana['x_is_yellow'] = banana.x_color == 9"
})
# default stage is ripeness_green
cls.env['ir.default'].set('x_bananas', 'x_ripeness_id', cls.ripeness_green[0])
cls.env['x_bananas'].create([{
'x_name': 'Banana #1',
'x_length': 3.14159,
'x_color': 9,
}, {
'x_name': 'Banana #2',
'x_length': 0,
'x_color': 6,
}, {
'x_name': 'Banana #3',
'x_length': 10,
'x_color': 6,
}])
def setUp(self):
# this cleanup is necessary after each test, and must be done last
self.addCleanup(self.registry.reset_changes)
super().setUp()
def test_model_order_constraint(self):
"""Check that the order constraint is properly enforced."""
VALID_ORDERS = ['id', 'id desc', 'id asc, x_length', 'x_color, x_length, create_uid']
for order in VALID_ORDERS:
self.bananas_model.order = order
INVALID_ORDERS = ['', 'x_wat', 'id esc', 'create_uid,', 'id, x_is_yellow']
for order in INVALID_ORDERS:
with self.assertRaises(ValidationError), self.cr.savepoint():
self.bananas_model.order = order
# check that the constraint is checked at model creation
fields_value = [
Command.create({'name': 'x_name', 'ttype': 'char', 'field_description': 'Name'}),
Command.create({'name': 'x_length', 'ttype': 'float', 'field_description': 'Length'}),
Command.create({'name': 'x_color', 'ttype': 'integer', 'field_description': 'Color'}),
]
self.env['ir.model'].create({
'name': 'MegaBananas',
'model': 'x_mega_bananas',
'order': 'x_name asc, id desc', # valid order
'field_id': fields_value,
})
with self.assertRaises(ValidationError):
self.env['ir.model'].create({
'name': 'GigaBananas',
'model': 'x_giga_bananas',
'order': 'x_name asc, x_wat', # invalid order
'field_id': fields_value,
})
# ensure we can order by a stored field via inherits
user_model = self.env['ir.model'].search([('model', '=', 'res.users')])
user_model._check_order() # must not raise
def test_model_order_search(self):
"""Check that custom orders are applied when querying a model."""
ORDERS = {
'id asc': ['Banana #1', 'Banana #2', 'Banana #3'],
'id desc': ['Banana #3', 'Banana #2', 'Banana #1'],
'x_color asc, id asc': ['Banana #2', 'Banana #3', 'Banana #1'],
'x_color asc, id desc': ['Banana #3', 'Banana #2', 'Banana #1'],
'x_length asc, id': ['Banana #2', 'Banana #1', 'Banana #3'],
}
for order, names in ORDERS.items():
self.bananas_model.order = order
self.assertEqual(self.env['x_bananas']._order, order)
bananas = self.env['x_bananas'].search([])
self.assertEqual(bananas.mapped('x_name'), names, 'failed to order by %s' % order)
def test_group_expansion(self):
"""Check that the basic custom group expansion works."""
groups = self.env['x_bananas'].read_group(domain=[],
fields=['x_ripeness_id'],
groupby=['x_ripeness_id'])
expected = [{
'x_ripeness_id': self.ripeness_green,
'x_ripeness_id_count': 3,
'__domain': [('x_ripeness_id', '=', self.ripeness_green[0])],
}, {
'x_ripeness_id': self.ripeness_okay,
'x_ripeness_id_count': 0,
'__domain': [('x_ripeness_id', '=', self.ripeness_okay[0])],
}, {
'x_ripeness_id': self.ripeness_gone,
'x_ripeness_id_count': 0,
'__domain': [('x_ripeness_id', '=', self.ripeness_gone[0])],
}]
self.assertEqual(groups, expected, 'should include 2 empty ripeness stages')
def test_rec_name_deletion(self):
"""Check that deleting 'x_name' does not crash."""
record = self.env['x_bananas'].create({'x_name': "Ifan Ben-Mezd"})
self.assertEqual(record._rec_name, 'x_name')
self.assertEqual(self.registry.field_depends[type(record).display_name], ('x_name',))
self.assertEqual(record.display_name, "Ifan Ben-Mezd")
# unlinking x_name should fixup _rec_name and display_name
self.env['ir.model.fields']._get('x_bananas', 'x_name').unlink()
record = self.env['x_bananas'].browse(record.id)
self.assertEqual(record._rec_name, None)
self.assertEqual(self.registry.field_depends[type(record).display_name], ())
self.assertEqual(record.display_name, f"x_bananas,{record.id}")
def test_monetary_currency_field(self):
fields_value = [
Command.create({'name': 'x_monetary', 'ttype': 'monetary', 'field_description': 'Monetary', 'currency_field': 'test'}),
]
with self.assertRaises(ValidationError):
self.env['ir.model'].create({
'name': 'Paper Company Model',
'model': 'x_paper_model',
'field_id': fields_value,
})
fields_value = [
Command.create({'name': 'x_monetary', 'ttype': 'monetary', 'field_description': 'Monetary', 'currency_field': 'x_falsy_currency'}),
Command.create({'name': 'x_falsy_currency', 'ttype': 'one2many', 'field_description': 'Currency', 'relation': 'res.currency'}),
]
with self.assertRaises(ValidationError):
self.env['ir.model'].create({
'name': 'Paper Company Model',
'model': 'x_paper_model',
'field_id': fields_value,
})
fields_value = [
Command.create({'name': 'x_monetary', 'ttype': 'monetary', 'field_description': 'Monetary', 'currency_field': 'x_falsy_currency'}),
Command.create({'name': 'x_falsy_currency', 'ttype': 'many2one', 'field_description': 'Currency', 'relation': 'res.partner'}),
]
with self.assertRaises(ValidationError):
self.env['ir.model'].create({
'name': 'Paper Company Model',
'model': 'x_paper_model',
'field_id': fields_value,
})
fields_value = [
Command.create({'name': 'x_monetary', 'ttype': 'monetary', 'field_description': 'Monetary', 'currency_field': 'x_good_currency'}),
Command.create({'name': 'x_good_currency', 'ttype': 'many2one', 'field_description': 'Currency', 'relation': 'res.currency'}),
]
model = self.env['ir.model'].create({
'name': 'Paper Company Model',
'model': 'x_paper_model',
'field_id': fields_value,
})
monetary_field = model.field_id.search([['name', 'ilike', 'x_monetary']])
self.assertEqual(len(monetary_field), 1,
"Should have the monetary field in the created ir.model")
self.assertEqual(monetary_field.currency_field, "x_good_currency",
"The currency field in monetary should have x_good_currency as name")
@tagged('-at_install', 'post_install')
class TestIrModelEdition(TransactionCase):
@ -561,11 +357,3 @@ class TestIrModelInherit(TransactionCase):
self.assertEqual(len(imi), 1)
self.assertEqual(imi.parent_id.model, "res.partner")
self.assertEqual(imi.parent_field_id.name, "partner_id")
def test_delegate_field(self):
imi = self.env["ir.model.inherit"].search(
[("model_id.model", "=", "ir.cron"), ("parent_field_id", "!=", False)]
)
self.assertEqual(len(imi), 1)
self.assertEqual(imi.parent_id.model, "ir.actions.server")
self.assertEqual(imi.parent_field_id.name, "ir_actions_server_id")

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from contextlib import contextmanager
from datetime import datetime
import psycopg2
import psycopg2.errors
@ -192,24 +192,61 @@ class TestIrSequenceGenerate(BaseCase):
with self.assertRaises(UserError):
env['ir.sequence'].next_by_code('test_sequence_type_7')
def test_ir_sequence_interpolation_dict(self):
""" Test date-based interpolation directives in sequence suffix/prefix. """
with environment() as env:
seq = env['ir.sequence'].create({
'code': 'test_sequence_type_8',
'name': "Test sequence",
'prefix': '%(year)s/%(month)s/%(day)s/',
'suffix': '/%(y)s/%(doy)s/%(woy)s',
})
self.assertTrue(seq)
now = datetime.now()
self.assertEqual(
env['ir.sequence'].next_by_code('test_sequence_type_8'),
now.strftime('%Y/%m/%d/1/%y/%j/%W'),
)
def test_ir_sequence_iso_directives(self):
""" Test ISO 8061 date directives in sequence suffix/prefix. """
with environment() as env:
seq = env['ir.sequence'].create({
'code': 'test_sequence_type_9',
'name': "Test sequence",
'prefix': '%(isoyear)s/%(isoy)s/',
'suffix': '/%(isoweek)s/%(weekday)s',
})
self.assertTrue(seq)
isoyear, isoweek, weekday = datetime.now().isocalendar()
self.assertEqual(
env['ir.sequence'].next_by_code('test_sequence_type_9'),
f"{isoyear}/{isoyear % 100}/1/{isoweek}/{weekday % 7}",
)
def test_ir_sequence_suffix(self):
""" test whether a user error is raised for an invalid sequence """
# try to create a sequence with invalid suffix
with environment() as env:
env['ir.sequence'].create({
'code': 'test_sequence_type_8',
'code': 'test_sequence_type_10',
'name': 'Test sequence',
'prefix': '',
'suffix': '/%(invalid)s',
})
with self.assertRaisesRegex(UserError, "Invalid prefix or suffix"):
env['ir.sequence'].next_by_code('test_sequence_type_8')
env['ir.sequence'].next_by_code('test_sequence_type_10')
@classmethod
def setUpClass(cls):
with environment() as env:
cls._sequence_ids = env['ir.sequence'].search([]).ids
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_5')
drop_sequence('test_sequence_type_6')
with environment() as env:
env['ir.sequence'].search([('id', 'not in', cls._sequence_ids)]).unlink()
class TestIrSequenceInit(common.TransactionCase):

View file

@ -13,14 +13,14 @@ from odoo.tools import misc
from odoo.tools.mail import (
is_html_empty, html2plaintext, html_to_inner_content, html_sanitize, append_content_to_html, plaintext2html,
email_domain_normalize, email_normalize, email_re,
email_split, email_split_and_format, email_split_tuples,
email_split, email_split_and_format, email_split_and_format_normalize, email_split_tuples,
single_email_re,
formataddr,
email_anonymize,
prepend_html_content,
)
from . import test_mail_examples
from . import mail_examples
@tagged('mail_sanitize')
@ -51,10 +51,16 @@ class TestSanitizer(BaseCase):
("lala<p>yop</p>xxx", "<p>lala</p><p>yop</p>xxx"), # trailing text
("Merci à l'intérêt pour notre produit.nous vous contacterons bientôt. Merci",
u"<p>Merci à l'intérêt pour notre produit.nous vous contacterons bientôt. Merci</p>"), # unicode
('<div>a<div>b</div></div>', '<div>a<div>b</div></div>'),
('<div><div>a</div></div>', '<div><div>a</div></div>'),
('<script> alert(1) </script>', ''),
('<head><title>Title of the document</title><head>', ''),
]
for content, expected in cases:
html = html_sanitize(content)
self.assertEqual(html, expected, 'html_sanitize is broken')
html = html_sanitize(html)
self.assertEqual(html, expected, 'html_sanitize is not idempotent')
def test_comment_malformed(self):
html = '''<!-- malformed-close --!> <img src='x' onerror='alert(1)'></img> --> comment <!-- normal comment --> --> out of context balise --!>'''
@ -115,7 +121,7 @@ class TestSanitizer(BaseCase):
self.assertEqual(html_sanitize(content, silent=False), '')
def test_html(self):
sanitized_html = html_sanitize(test_mail_examples.MISC_HTML_SOURCE)
sanitized_html = html_sanitize(mail_examples.MISC_HTML_SOURCE)
for tag in ['<div', '<b', '<i', '<u', '<strike', '<li', '<blockquote', '<a href']:
self.assertIn(tag, sanitized_html, 'html_sanitize stripped too much of original html')
for attr in ['javascript']:
@ -204,21 +210,21 @@ class TestSanitizer(BaseCase):
self.assertEqual(new_html, u'<span>Coin coin </span>')
def test_style_class(self):
html = html_sanitize(test_mail_examples.REMOVE_CLASS, sanitize_attributes=True, sanitize_style=True, strip_classes=True)
for ext in test_mail_examples.REMOVE_CLASS_IN:
html = html_sanitize(mail_examples.REMOVE_CLASS, sanitize_attributes=True, sanitize_style=True, strip_classes=True)
for ext in mail_examples.REMOVE_CLASS_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.REMOVE_CLASS_OUT:
for ext in mail_examples.REMOVE_CLASS_OUT:
self.assertNotIn(ext, html,)
def test_style_class_only(self):
html = html_sanitize(test_mail_examples.REMOVE_CLASS, sanitize_attributes=False, sanitize_style=True, strip_classes=True)
for ext in test_mail_examples.REMOVE_CLASS_IN:
html = html_sanitize(mail_examples.REMOVE_CLASS, sanitize_attributes=False, sanitize_style=True, strip_classes=True)
for ext in mail_examples.REMOVE_CLASS_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.REMOVE_CLASS_OUT:
for ext in mail_examples.REMOVE_CLASS_OUT:
self.assertNotIn(ext, html,)
def test_edi_source(self):
html = html_sanitize(test_mail_examples.EDI_LIKE_HTML_SOURCE)
html = html_sanitize(mail_examples.EDI_LIKE_HTML_SOURCE)
self.assertIn(
'font-family: \'Lucida Grande\', Ubuntu, Arial, Verdana, sans-serif;', html,
'html_sanitize removed valid styling')
@ -228,51 +234,51 @@ class TestSanitizer(BaseCase):
self.assertNotIn('</body></html>', html, 'html_sanitize did not remove extra closing tags')
def test_quote_blockquote(self):
html = html_sanitize(test_mail_examples.QUOTE_BLOCKQUOTE)
for ext in test_mail_examples.QUOTE_BLOCKQUOTE_IN:
html = html_sanitize(mail_examples.QUOTE_BLOCKQUOTE)
for ext in mail_examples.QUOTE_BLOCKQUOTE_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_BLOCKQUOTE_OUT:
for ext in mail_examples.QUOTE_BLOCKQUOTE_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s' % misc.html_escape(ext), html)
def test_quote_thunderbird(self):
html = html_sanitize(test_mail_examples.QUOTE_THUNDERBIRD_1)
for ext in test_mail_examples.QUOTE_THUNDERBIRD_1_IN:
html = html_sanitize(mail_examples.QUOTE_THUNDERBIRD_1)
for ext in mail_examples.QUOTE_THUNDERBIRD_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_THUNDERBIRD_1_OUT:
for ext in mail_examples.QUOTE_THUNDERBIRD_1_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
def test_quote_hotmail_html(self):
html = html_sanitize(test_mail_examples.QUOTE_HOTMAIL_HTML)
for ext in test_mail_examples.QUOTE_HOTMAIL_HTML_IN:
html = html_sanitize(mail_examples.QUOTE_HOTMAIL_HTML)
for ext in mail_examples.QUOTE_HOTMAIL_HTML_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_HOTMAIL_HTML_OUT:
for ext in mail_examples.QUOTE_HOTMAIL_HTML_OUT:
self.assertIn(ext, html)
html = html_sanitize(test_mail_examples.HOTMAIL_1)
for ext in test_mail_examples.HOTMAIL_1_IN:
html = html_sanitize(mail_examples.HOTMAIL_1)
for ext in mail_examples.HOTMAIL_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.HOTMAIL_1_OUT:
for ext in mail_examples.HOTMAIL_1_OUT:
self.assertIn(ext, html)
def test_quote_outlook_html(self):
html = html_sanitize(test_mail_examples.QUOTE_OUTLOOK_HTML)
for ext in test_mail_examples.QUOTE_OUTLOOK_HTML_IN:
html = html_sanitize(mail_examples.QUOTE_OUTLOOK_HTML)
for ext in mail_examples.QUOTE_OUTLOOK_HTML_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_OUTLOOK_HTML_OUT:
for ext in mail_examples.QUOTE_OUTLOOK_HTML_OUT:
self.assertIn(ext, html)
def test_quote_thunderbird_html(self):
html = html_sanitize(test_mail_examples.QUOTE_THUNDERBIRD_HTML)
for ext in test_mail_examples.QUOTE_THUNDERBIRD_HTML_IN:
html = html_sanitize(mail_examples.QUOTE_THUNDERBIRD_HTML)
for ext in mail_examples.QUOTE_THUNDERBIRD_HTML_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_THUNDERBIRD_HTML_OUT:
for ext in mail_examples.QUOTE_THUNDERBIRD_HTML_OUT:
self.assertIn(ext, html)
def test_quote_yahoo_html(self):
html = html_sanitize(test_mail_examples.QUOTE_YAHOO_HTML)
for ext in test_mail_examples.QUOTE_YAHOO_HTML_IN:
html = html_sanitize(mail_examples.QUOTE_YAHOO_HTML)
for ext in mail_examples.QUOTE_YAHOO_HTML_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.QUOTE_YAHOO_HTML_OUT:
for ext in mail_examples.QUOTE_YAHOO_HTML_OUT:
self.assertIn(ext, html)
def test_quote_basic_text(self):
@ -337,30 +343,30 @@ class TestSanitizer(BaseCase):
self.assertEqual(sanitized_twice, expected_result)
def test_quote_gmail(self):
html = html_sanitize(test_mail_examples.GMAIL_1)
for ext in test_mail_examples.GMAIL_1_IN:
html = html_sanitize(mail_examples.GMAIL_1)
for ext in mail_examples.GMAIL_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.GMAIL_1_OUT:
for ext in mail_examples.GMAIL_1_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
def test_quote_text(self):
html = html_sanitize(test_mail_examples.TEXT_1)
for ext in test_mail_examples.TEXT_1_IN:
html = html_sanitize(mail_examples.TEXT_1)
for ext in mail_examples.TEXT_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.TEXT_1_OUT:
for ext in mail_examples.TEXT_1_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
html = html_sanitize(test_mail_examples.TEXT_2)
for ext in test_mail_examples.TEXT_2_IN:
html = html_sanitize(mail_examples.TEXT_2)
for ext in mail_examples.TEXT_2_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.TEXT_2_OUT:
for ext in mail_examples.TEXT_2_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
def test_quote_bugs(self):
html = html_sanitize(test_mail_examples.BUG1)
for ext in test_mail_examples.BUG_1_IN:
html = html_sanitize(mail_examples.BUG1)
for ext in mail_examples.BUG_1_IN:
self.assertIn(ext, html)
for ext in test_mail_examples.BUG_1_OUT:
for ext in mail_examples.BUG_1_OUT:
self.assertIn(u'<span data-o-mail-quote="1">%s</span>' % misc.html_escape(ext), html)
def test_misc(self):
@ -383,10 +389,10 @@ class TestSanitizer(BaseCase):
# ms office is currently not supported, have to find a way to support it
# def test_30_email_msoffice(self):
# new_html = html_sanitize(test_mail_examples.MSOFFICE_1, remove=True)
# for ext in test_mail_examples.MSOFFICE_1_IN:
# new_html = html_sanitize(mail_examples.MSOFFICE_1, remove=True)
# for ext in mail_examples.MSOFFICE_1_IN:
# self.assertIn(ext, new_html)
# for ext in test_mail_examples.MSOFFICE_1_OUT:
# for ext in mail_examples.MSOFFICE_1_OUT:
# self.assertNotIn(ext, new_html)
@ -726,7 +732,8 @@ class TestEmailTools(BaseCase):
def test_email_split_and_format(self):
""" Test 'email_split_and_format', notably in case of multi encapsulation
or multi emails. """
or multi emails. Also check 'email_split_and_format_normalize' while
being there. """
sources = [
'deboulonneur@example.com',
'"Super Déboulonneur" <deboulonneur@example.com>', # formatted
@ -745,6 +752,8 @@ class TestEmailTools(BaseCase):
'"Déboulonneur 😊" <deboulonneur@example.com>', # unicode in name
'"Déboulonneur 😊" <deboulonneur.😊@example.com>', # unicode in name and email left-part
'"Déboulonneur" <déboulonneur@examplé.com>', # utf-8
'"Déboulonneur" <DEboulonneur@😊.example.com>', # case + unicode
'"Déboulonneur" <DÉBoulonneur.😊@Éxamplé.com>', # case + utf-8 + unicode
]
expected_list = [
['deboulonneur@example.com'],
@ -764,13 +773,24 @@ class TestEmailTools(BaseCase):
['"Déboulonneur 😊" <deboulonneur@example.com>'],
['"Déboulonneur 😊" <deboulonneur.😊@example.com>'],
['"Déboulonneur" <déboulonneur@examplé.com>'],
['"Déboulonneur" <DEboulonneur@😊.example.com>'],
['"Déboulonneur" <DÉBoulonneur.😊@Éxamplé.com>'],
]
# mostly the same except 3 cases so don't copy paste everything
normalized = {
# lower
'deboulonneur@example.com Déboulonneur': ['deboulonneur@example.comdéboulonneur'],
'"Déboulonneur" <DEboulonneur@😊.example.com>': ['"Déboulonneur" <deboulonneur@😊.example.com>'],
# encoded -> not lowerized
'"Déboulonneur" <DÉBoulonneur.😊@Éxamplé.com>': ['"Déboulonneur" <DÉBoulonneur.😊@éxamplé.com>'],
}
for source, expected in zip(sources, expected_list):
with self.subTest(source=source):
self.assertEqual(email_split_and_format(source), expected)
self.assertEqual(email_split_and_format_normalize(source), normalized.get(source, expected))
def test_email_split_tuples(self):
""" Test 'email_split_and_format' that returns (name, email) pairs
""" Test 'email_split_tuples' that returns (name, email) pairs
found in text input """
expected = [
# single email

View file

@ -1,23 +1,15 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import datetime
from dateutil.relativedelta import relativedelta
import os.path
import pytz
from odoo.tools import (
config,
date_utils,
file_open,
file_path,
merge_sequences,
misc,
remove_accents,
)
from odoo.tests.common import BaseCase, TransactionCase
from odoo.tools import config, misc, urls
from odoo.tools.mail import validate_url
from odoo.tests.common import TransactionCase, BaseCase
from odoo.tools.misc import file_open, file_path, merge_sequences, remove_accents
class TestMergeSequences(BaseCase):
@ -48,149 +40,6 @@ class TestMergeSequences(BaseCase):
self.assertEqual(seq, ['A', 'B', 'X', 'Y', 'C', 'Z'])
class TestDateRangeFunction(BaseCase):
""" Test on date_range generator. """
def test_date_range_with_naive_datetimes(self):
""" Check date_range with naive datetimes. """
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
expected = [
datetime.datetime(1985, 1, 1, 0, 0),
datetime.datetime(1985, 2, 1, 0, 0),
datetime.datetime(1985, 3, 1, 0, 0),
datetime.datetime(1985, 4, 1, 0, 0),
datetime.datetime(1985, 5, 1, 0, 0),
datetime.datetime(1985, 6, 1, 0, 0),
datetime.datetime(1985, 7, 1, 0, 0),
datetime.datetime(1985, 8, 1, 0, 0),
datetime.datetime(1985, 9, 1, 0, 0),
datetime.datetime(1985, 10, 1, 0, 0),
datetime.datetime(1985, 11, 1, 0, 0),
datetime.datetime(1985, 12, 1, 0, 0),
datetime.datetime(1986, 1, 1, 0, 0)
]
dates = [date for date in date_utils.date_range(start, end)]
self.assertEqual(dates, expected)
def test_date_range_with_date(self):
""" Check date_range with naive datetimes. """
start = datetime.date(1985, 1, 1)
end = datetime.date(1986, 1, 1)
expected = [
datetime.date(1985, 1, 1),
datetime.date(1985, 2, 1),
datetime.date(1985, 3, 1),
datetime.date(1985, 4, 1),
datetime.date(1985, 5, 1),
datetime.date(1985, 6, 1),
datetime.date(1985, 7, 1),
datetime.date(1985, 8, 1),
datetime.date(1985, 9, 1),
datetime.date(1985, 10, 1),
datetime.date(1985, 11, 1),
datetime.date(1985, 12, 1),
datetime.date(1986, 1, 1),
]
self.assertEqual(list(date_utils.date_range(start, end)), expected)
def test_date_range_with_timezone_aware_datetimes_other_than_utc(self):
""" Check date_range with timezone-aware datetimes other than UTC."""
timezone = pytz.timezone('Europe/Brussels')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
start = timezone.localize(start)
end = timezone.localize(end)
expected = [datetime.datetime(1985, 1, 1, 0, 0),
datetime.datetime(1985, 2, 1, 0, 0),
datetime.datetime(1985, 3, 1, 0, 0),
datetime.datetime(1985, 4, 1, 0, 0),
datetime.datetime(1985, 5, 1, 0, 0),
datetime.datetime(1985, 6, 1, 0, 0),
datetime.datetime(1985, 7, 1, 0, 0),
datetime.datetime(1985, 8, 1, 0, 0),
datetime.datetime(1985, 9, 1, 0, 0),
datetime.datetime(1985, 10, 1, 0, 0),
datetime.datetime(1985, 11, 1, 0, 0),
datetime.datetime(1985, 12, 1, 0, 0),
datetime.datetime(1986, 1, 1, 0, 0)]
expected = [timezone.localize(e) for e in expected]
dates = [date for date in date_utils.date_range(start, end)]
self.assertEqual(expected, dates)
def test_date_range_with_mismatching_zones(self):
""" Check date_range with mismatching zone should raise an exception."""
start_timezone = pytz.timezone('Europe/Brussels')
end_timezone = pytz.timezone('America/Recife')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
start = start_timezone.localize(start)
end = end_timezone.localize(end)
with self.assertRaises(ValueError):
dates = [date for date in date_utils.date_range(start, end)]
def test_date_range_with_inconsistent_datetimes(self):
""" Check date_range with a timezone-aware datetime and a naive one."""
context_timezone = pytz.timezone('Europe/Brussels')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
end = context_timezone.localize(end)
with self.assertRaises(ValueError):
dates = [date for date in date_utils.date_range(start, end)]
def test_date_range_with_hour(self):
""" Test date range with hour and naive datetime."""
start = datetime.datetime(2018, 3, 25)
end = datetime.datetime(2018, 3, 26)
step = relativedelta(hours=1)
expected = [
datetime.datetime(2018, 3, 25, 0, 0),
datetime.datetime(2018, 3, 25, 1, 0),
datetime.datetime(2018, 3, 25, 2, 0),
datetime.datetime(2018, 3, 25, 3, 0),
datetime.datetime(2018, 3, 25, 4, 0),
datetime.datetime(2018, 3, 25, 5, 0),
datetime.datetime(2018, 3, 25, 6, 0),
datetime.datetime(2018, 3, 25, 7, 0),
datetime.datetime(2018, 3, 25, 8, 0),
datetime.datetime(2018, 3, 25, 9, 0),
datetime.datetime(2018, 3, 25, 10, 0),
datetime.datetime(2018, 3, 25, 11, 0),
datetime.datetime(2018, 3, 25, 12, 0),
datetime.datetime(2018, 3, 25, 13, 0),
datetime.datetime(2018, 3, 25, 14, 0),
datetime.datetime(2018, 3, 25, 15, 0),
datetime.datetime(2018, 3, 25, 16, 0),
datetime.datetime(2018, 3, 25, 17, 0),
datetime.datetime(2018, 3, 25, 18, 0),
datetime.datetime(2018, 3, 25, 19, 0),
datetime.datetime(2018, 3, 25, 20, 0),
datetime.datetime(2018, 3, 25, 21, 0),
datetime.datetime(2018, 3, 25, 22, 0),
datetime.datetime(2018, 3, 25, 23, 0),
datetime.datetime(2018, 3, 26, 0, 0)
]
dates = [date for date in date_utils.date_range(start, end, step)]
self.assertEqual(dates, expected)
class TestFormatLangDate(TransactionCase):
def test_00_accepted_types(self):
self.env.user.tz = 'Europe/Brussels'
@ -384,17 +233,17 @@ class TestRemoveAccents(BaseCase):
class TestAddonsFileAccess(BaseCase):
def assertCannotAccess(self, path, ExceptionType=FileNotFoundError, filter_ext=None):
def assertCannotAccess(self, path, ExceptionType=OSError, filter_ext=None, check_exists=True):
with self.assertRaises(ExceptionType):
file_path(path, filter_ext=filter_ext)
file_path(path, filter_ext=filter_ext, check_exists=check_exists)
def assertCanRead(self, path, needle='', mode='r', filter_ext=None):
with file_open(path, mode, filter_ext) as f:
self.assertIn(needle, f.read())
def assertCannotRead(self, path, ExceptionType=FileNotFoundError, filter_ext=None):
def assertCannotRead(self, path, ExceptionType=OSError, filter_ext=None):
with self.assertRaises(ExceptionType):
file_open(path, filter_ext=filter_ext)
file_open(path, filter_ext=filter_ext).close()
def test_file_path(self):
# absolute path
@ -418,6 +267,10 @@ class TestAddonsFileAccess(BaseCase):
# files in root_path are allowed
self.assertTrue(file_path('tools/misc.py'))
# absolute or relative inexisting files are ok
self.assertTrue(file_path(config.root_path + '/__inexisting', check_exists=False))
self.assertTrue(file_path('base/__inexisting_file', check_exists=False))
# errors when outside addons_paths
self.assertCannotAccess('/doesnt/exist')
self.assertCannotAccess('/tmp')
@ -444,7 +297,7 @@ class TestAddonsFileAccess(BaseCase):
self.assertCanRead(__file__, test_needle.encode(), mode='rb', filter_ext=('.py',))
# directory target *is* an error
with self.assertRaises(FileNotFoundError):
with self.assertRaises(IsADirectoryError):
file_open(os.path.join(__file__, '..'))
# relative path
@ -461,6 +314,10 @@ class TestAddonsFileAccess(BaseCase):
# files in root_path are allowed
self.assertCanRead('tools/misc.py')
# absolute or relative inexisting files are ok
self.assertCannotRead(config.root_path + '/__inexisting')
self.assertCannotRead('base/__inexisting_file')
# errors when outside addons_paths
self.assertCannotRead('/doesnt/exist')
self.assertCannotRead('')
@ -505,15 +362,15 @@ class TestFormatLang(TransactionCase):
self.env["res.lang"].create({
"name": "formatLang Lang",
"code": "fLT",
"grouping": "[3,2,-1]",
"grouping": "[3,0]",
"decimal_point": "!",
"thousands_sep": "?",
})
self.env['res.lang']._activate_lang('fLT')
self.assertEqual(misc.formatLang(self.env['res.lang'].with_context(lang='fLT').env, 1000000000, grouping=True), '10000?00?000!00')
self.assertEqual(misc.formatLang(self.env['res.lang'].with_context(lang='fLT').env, 1000000000, grouping=False), '1000000000.00')
self.assertEqual(misc.formatLang(self.env['res.lang'].with_context(lang='fLT').env, 1000000000, grouping=True), '1?000?000?000!00')
self.assertEqual(misc.formatLang(self.env['res.lang'].with_context(lang='fLT').env, 1000000000, grouping=False), '1000000000!00')
def test_decimal_precision(self):
decimal_precision = self.env['decimal.precision'].create({
@ -575,6 +432,24 @@ class TestFormatLang(TransactionCase):
self.assertEqual(misc.formatLang(self.env, 1822050000, rounding_method='HALF-UP', rounding_unit='lakhs'), '18,221')
self.assertEqual(misc.formatLang(self.env, 1822049900, rounding_method='HALF-UP', rounding_unit='lakhs'), '18,220')
def test_format_decimal_point_without_grouping(self):
lang = self.env['res.lang'].browse(misc.get_lang(self.env).id)
self.assertEqual(lang.format(f'%.{1}f', 1200.50, grouping=True), '1,200.5')
self.assertEqual(lang.format(f'%.{1}f', 1200.50, grouping=False), '1200.5')
comma_lang = self.env['res.lang'].create({
'name': 'Comma (CM)',
'code': 'co_MA',
'iso_code': 'co_MA',
'thousands_sep': ' ',
'decimal_point': ',',
'grouping': '[3,0]',
'active': True,
})
self.assertEqual(comma_lang.format(f'%.{1}f', 1200.50, grouping=True), '1 200,5')
self.assertEqual(comma_lang.format(f'%.{1}f', 1200.50, grouping=False), '1200,5')
class TestUrlValidate(BaseCase):
def test_url_validate(self):
@ -600,6 +475,113 @@ class TestUrlValidate(BaseCase):
self.assertEqual(validate_url('#model=project.task&id=3603607'), 'http://#model=project.task&id=3603607')
class TestUrlJoin(BaseCase):
# simple path joins
def test_basic_relative_path(self):
self.assertEqual(urls.urljoin('http://example.com/', 'c'), 'http://example.com/c')
self.assertEqual(urls.urljoin('http://example.com/b/', 'c'), 'http://example.com/b/c')
def test_path_normalization(self):
self.assertEqual(urls.urljoin('http://example.com/b/', '/c'), 'http://example.com/b/c') # leading / normalized
self.assertEqual(urls.urljoin('http://example.com/b///', '///c'), 'http://example.com/b/c')
self.assertEqual(urls.urljoin('http://example.com/b/', 'c/'), 'http://example.com/b/c/') # trailing / must be kept
def test_base_has_no_path(self):
self.assertEqual(urls.urljoin('http://example.com', 'c.com'), 'http://example.com/c.com')
self.assertEqual(urls.urljoin('http://example.com', '/c'), 'http://example.com/c')
def test_extra_trailing_slash(self):
self.assertEqual(urls.urljoin('http://example.com/b', ''), 'http://example.com/b')
self.assertEqual(urls.urljoin('http://example.com/b', ' '), 'http://example.com/b')
self.assertEqual(urls.urljoin('http://example.com/b', '/'), 'http://example.com/b/')
# Scheme/Netloc
def test_leading_and_trailing_slashes(self):
self.assertEqual(urls.urljoin('http://example.com/b//c/d/e/////f/g/', '/h/i/j/'), 'http://example.com/b/c/d/e/f/g/h/i/j/')
self.assertEqual(urls.urljoin('http://example.com/b//c/d/e/////f/g', '/h/i/j/'), 'http://example.com/b/c/d/e/f/g/h/i/j/')
self.assertEqual(urls.urljoin('http://example.com/b//c/d/e/////f/g', 'h/i/j/'), 'http://example.com/b/c/d/e/f/g/h/i/j/')
self.assertEqual(urls.urljoin('http://example.com/b//c/d/e/////f/g//', '/h/i/j'), 'http://example.com/b/c/d/e/f/g/h/i/j')
self.assertEqual(urls.urljoin('http://example.com//', '/b/c'), 'http://example.com/b/c')
self.assertEqual(urls.urljoin('/', '\\/example.com'), '/example.com')
self.assertEqual(urls.urljoin('/', '\\\x07/example.com'), '/example.com')
self.assertEqual(urls.urljoin('/', '\r\n\t\x00\\\r\n\t/example.com'), '/example.com')
def test_absolute_url_raises(self):
to_fail = [
('http://example.com/b#f1', 'http://example.com/c#f2'),
('http://test.example.com', 'https://test2.example.com'),
('https://test.example.com', 'http://test.example.com'),
('https://example.com/p?example=test', 'https://example.com/q?example=example'),
]
for base, extra in to_fail:
with self.subTest(base=base, extra=extra):
with self.assertRaises(ValueError):
urls.urljoin(base, extra)
def test_dot_segments_not_allowed(self):
urls_with_dot = [
('http://example.com/b/', 'c/./d'),
('http://example.com/b/', 'c/../d'),
('http://example.com/b/', 'c/d/%2E%2E/e'),
('http://example.com/b/', 'c/%2E/d'),
('http://example.com/b/', 'c%2F%2E./d'),
('http://example.com/b/', 'c%2F%2E%2Fd'),
('http://example.com/./b/', 'c/d'),
('http://example.com/b/../', 'c/d'),
('http://example.com/%2E/b/', 'c/d'),
('http://example.com/b%2F%2E%2E/d', 'c/d'),
]
for base, extra in urls_with_dot:
with self.subTest(base=base, extra=extra):
with self.assertRaises(ValueError):
urls.urljoin(base, extra)
# Query Handling
def test_query_keeps_base_by_default(self):
self.assertEqual(urls.urljoin('http://example.com/b?q1=1', 'c?q2=2'), 'http://example.com/b/c?q2=2')
self.assertEqual(urls.urljoin('http://example.com/b', 'c?q2=2'), 'http://example.com/b/c?q2=2')
self.assertEqual(urls.urljoin('http://example.com/b?q1=1', 'c'), 'http://example.com/b/c')
def test_allow_query_override(self):
self.assertEqual(urls.urljoin('http://example.com/b', 'c?q2=2'), 'http://example.com/b/c?q2=2')
self.assertEqual(urls.urljoin('http://example.com/b?q1=1', 'c'), 'http://example.com/b/c')
self.assertEqual(urls.urljoin('http://example.com/b?q1=1', 'c?q2=2'), 'http://example.com/b/c?q2=2')
self.assertEqual(urls.urljoin('http://example.com/b#c?q1=2&q2=3', 'c?q1=1&q2=2'), 'http://example.com/b/c?q1=1&q2=2')
# Fragment Handling
def test_only_extra_fragments(self):
self.assertEqual(urls.urljoin('http://example.com/b#f1', 'c#f2'), 'http://example.com/b/c#f2')
self.assertEqual(urls.urljoin('http://example.com/b', 'c#f2'), 'http://example.com/b/c#f2')
self.assertEqual(urls.urljoin('http://example.com/b#f1', 'c'), 'http://example.com/b/c')
# Input Validation
def test_not_string_fails(self):
with self.assertRaises(AssertionError):
urls.urljoin(None, 'c')
with self.assertRaises(AssertionError):
urls.urljoin('http://a', 123)
# Edge Cases
def test_whitespaces(self):
self.assertEqual(urls.urljoin('http://example.com/b', ' \ta '), 'http://example.com/b/a ')
self.assertEqual(urls.urljoin('http://example.com/b', '\t \x0a\x0b\n\r\t a\t \t'), 'http://example.com/b/a ')
self.assertEqual(urls.urljoin('http://example.com/b', ' a \n\t'), 'http://example.com/b/a ')
def test_empty_base_string(self):
self.assertEqual(urls.urljoin('', 'example.com'), '/example.com')
self.assertEqual(urls.urljoin('', '/c?q=1#f'), '/c?q=1#f')
def test_percent_encoding(self):
self.assertEqual(
urls.urljoin('http://host/space%20here/', 'x%2Fy'),
'http://host/space%20here/x%2Fy',
)
self.assertEqual(
urls.urljoin('http://host/a/', '%2Fhidden'),
'http://host/a/%2Fhidden',
)
class TestMiscToken(TransactionCase):
def test_expired_token(self):
@ -630,3 +612,79 @@ class TestMiscToken(TransactionCase):
new_timestamp = new_timestamp.to_bytes(8, byteorder='little')
token = base64.urlsafe_b64encode(token[:1] + new_timestamp + token[9:]).decode()
self.assertIsNone(misc.verify_hash_signed(self.env, 'test', token))
class TestFormatAmountFunction(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.currency_object_format_amount = cls.env["res.currency"].create({
"name": "format_amount Currency",
"symbol": "fA",
"rounding": 0.01, # Makes 12.345 as 12.34
"position": "before",
})
# A language where decimal separator and thousands separator is same to check effectiveness of
# regular expression used in format_amount
cls.kiliki_language = cls.env["res.lang"].create({
"name": "Kili kili",
"code": "GFL",
"grouping": "[3,0]",
"decimal_point": "#",
"thousands_sep": "#",
})
cls.kiliki_language.install_lang()
cls.kiliki_language.active = True
def assert_format_amount(self, amount, expected, trailing_zeroes=True, lang_code=None):
result = misc.format_amount(
self.env,
amount,
self.currency_object_format_amount,
trailing_zeroes=trailing_zeroes,
lang_code=lang_code,
)
self.assertEqual(result, expected)
def test_trailing_true_on_number_having_no_trailing_zeroes(self):
# Has no effect on number not having trailing zeroes
self.assert_format_amount(1.234, "fA%s1.23" % "\N{NO-BREAK SPACE}")
# Has no effect on number not having trailing zeroes - currency position after
self.currency_object_format_amount.position = "after"
self.assert_format_amount(1.234, "1.23%sfA" % "\N{NO-BREAK SPACE}")
def test_trailing_false_on_number_having_no_trailing_zeroes(self):
# Has no effect on number not having trailing zeroes even if trailing zeroes set as False
self.assert_format_amount(1.234, "fA%s1.23" % "\N{NO-BREAK SPACE}")
# Has no effect on number not having trailing zeroes - currency position after
self.currency_object_format_amount.position = "after"
self.assert_format_amount(1.234, "1.23%sfA" % "\N{NO-BREAK SPACE}")
def test_trailing_zeroes_true_on_number_having_trailing_zeroes(self):
# Has no effect on number having trailing zeroes if trailing zeroes set as True (True by default)
self.assert_format_amount(1.0000, "fA%s1.00" % "\N{NO-BREAK SPACE}")
# Has no effect on number having trailing zeroes - currency position after
self.currency_object_format_amount.position = "after"
self.assert_format_amount(1.0000, "1.00%sfA" % "\N{NO-BREAK SPACE}")
def test_trailing_false_on_number_having_trailing_zeroes(self):
# Has effect (removes trailing zeroes) on number having trailing zeroes if trailing zeroes set as False
self.assert_format_amount(1.0000, "fA%s1" % "\N{NO-BREAK SPACE}", False)
# Has effect on number having trailing zeroes - currency position after
self.currency_object_format_amount.position = "after"
self.assert_format_amount(1.0000, "1%sfA" % "\N{NO-BREAK SPACE}", False)
def test_trailing_false_on_number_having_trailing_zeroes_with_kilikili_language(self):
# Here the amount is first will be given decimal separator and thousandth separator as
# follows 10#000#00 in which second # is decimal so, the RE targets the decimal separator
# at the last position.
self.assert_format_amount(10000, "fA%s10#000" % "\N{NO-BREAK SPACE}", False, "GFL")
# Has no effect on number having same decimal and thousandth seperator - currency position after
self.currency_object_format_amount.position = "after"
self.assert_format_amount(10000, "10#000%sfA" % "\N{NO-BREAK SPACE}", False, "GFL")

View file

@ -6,8 +6,7 @@ from os.path import join as opj
from unittest.mock import patch
import odoo.addons
from odoo.modules.module import load_manifest
from odoo.modules.module import get_manifest
from odoo.modules.module import Manifest
from odoo.release import major_version
from odoo.tests.common import BaseCase
@ -15,7 +14,7 @@ from odoo.tests.common import BaseCase
class TestModuleManifest(BaseCase):
@classmethod
def setUpClass(cls):
cls._tmp_dir = tempfile.TemporaryDirectory(prefix='odoo-test-addons-')
cls._tmp_dir = tempfile.TemporaryDirectory(prefix='odoo_test_addons_')
cls.addClassCleanup(cls._tmp_dir.cleanup)
cls.addons_path = cls._tmp_dir.name
@ -23,32 +22,33 @@ class TestModuleManifest(BaseCase):
cls.startClassPatcher(patcher)
def setUp(self):
self.module_root = tempfile.mkdtemp(prefix='odoo-test-module-', dir=self.addons_path)
self.module_root = tempfile.mkdtemp(prefix='odoo_test_module_', dir=self.addons_path)
self.module_name = os.path.basename(self.module_root)
def test_default_manifest(self):
with open(opj(self.module_root, '__manifest__.py'), 'w') as file:
file.write(str({'name': f'Temp {self.module_name}', 'license': 'MIT'}))
file.write(str({'name': f'Temp {self.module_name}', 'license': 'MIT', 'author': 'Fapi'}))
with self.assertNoLogs('odoo.modules.module', 'WARNING'):
manifest = load_manifest(self.module_name)
manifest = dict(Manifest.for_addon(self.module_name))
self.maxDiff = None
self.assertDictEqual(manifest, {
'addons_path': self.addons_path,
'application': False,
'assets': {},
'author': 'Odoo S.A.',
'author': 'Fapi',
'auto_install': False,
'bootstrap': False,
'category': 'Uncategorized',
'cloc_exclude': [],
'configurator_snippets': {},
'configurator_snippets_addons': {},
'countries': [],
'data': [],
'demo': [],
'demo_xml': [],
'depends': [],
'depends': ['base'],
'description': '',
'external_dependencies': {},
'icon': '/base/static/description/icon.png',
@ -64,8 +64,10 @@ class TestModuleManifest(BaseCase):
'post_load': '',
'pre_init_hook': '',
'sequence': 100,
'static_path': None,
'summary': '',
'test': [],
'theme_customizations': {},
'update_xml': [],
'uninstall_hook': '',
'version': f'{major_version}.1.0',
@ -75,22 +77,25 @@ class TestModuleManifest(BaseCase):
def test_change_manifest(self):
module_name = 'base'
new_manifest = get_manifest(module_name)
new_manifest = Manifest.for_addon(module_name)
orig_auto_install = new_manifest['auto_install']
new_manifest['auto_install'] = not orig_auto_install
self.assertNotEqual(new_manifest, get_manifest(module_name))
self.assertEqual(orig_auto_install, get_manifest(module_name)['auto_install'])
with self.assertRaisesRegex(TypeError, r'does not support item assignment'):
new_manifest['auto_install'] = not orig_auto_install
self.assertIs(Manifest.for_addon(module_name), new_manifest)
def test_missing_manifest(self):
with self.assertLogs('odoo.modules.module', 'DEBUG') as capture:
manifest = load_manifest(self.module_name)
self.assertEqual(manifest, {})
self.assertIn("no manifest file found", capture.output[0])
manifest = Manifest.for_addon(self.module_name)
self.assertIs(manifest, None)
self.assertIn("manifest not found", capture.output[0])
def test_missing_license(self):
with open(opj(self.module_root, '__manifest__.py'), 'w') as file:
file.write(str({'name': f'Temp {self.module_name}'}))
with self.assertLogs('odoo.modules.module', 'WARNING') as capture:
manifest = load_manifest(self.module_name)
manifest = Manifest.for_addon(self.module_name)
manifest.manifest_cached
self.assertEqual(manifest['license'], 'LGPL-3')
self.assertIn("Missing `license` key", capture.output[0])
self.assertEqual(manifest['author'], '')
self.assertIn("Missing `author` key", capture.output[0])
self.assertIn("Missing `license` key", capture.output[1])

View file

@ -0,0 +1,134 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from odoo.tests.common import BaseCase
from odoo.modules.module_graph import ModuleGraph
from odoo.modules.module import _DEFAULT_MANIFEST, Manifest
from odoo.tools import mute_logger
class TestGraph(BaseCase):
@mute_logger('odoo.modules.module_graph')
def _test_graph_order(
self,
dependency: dict[str, list[str]],
modules_list: list[list[str]],
expected: list[str]
) -> None:
"""
Test the order of the modules that need to be loaded
:param dependency: A dictionary of module dependency: {module_a: [module_b, module_c]}
:param modules_list: [['module_a', 'module_b'], ['module_c'], ...]
module_a and module_b will be added in the first round
module_c will be added in the second round
...
:param expected: expected graph order
"""
def make_manifest(name, **kw):
if name not in dependency:
return None
return Manifest(
path='/dummy/' + name,
manifest_content=dict(_DEFAULT_MANIFEST, author='test', license='LGPL-3', depends=dependency.get(name, [])),
)
with (
patch('odoo.modules.module_graph.ModuleGraph._update_from_database'),
patch('odoo.modules.module_graph.Manifest.for_addon', make_manifest),
patch('odoo.modules.module_graph.ModuleGraph._imported_modules', {'studio_customization'}),
):
dummy_cr = None
graph = ModuleGraph(dummy_cr)
for modules in modules_list:
graph.extend(modules)
names = [p.name for p in graph]
self.assertListEqual(names, expected)
def test_graph_order_1(self):
dependency = {
'base': [],
'module1': ['base'],
'module2': ['module1'],
'module3': ['module1'],
'module4': ['module2', 'module3'],
'module5': ['module2', 'module4'],
}
# modules are in random order
self._test_graph_order(
dependency,
[['base'], ['module3', 'module4', 'module1', 'module5', 'module2']],
['base', 'module1', 'module2', 'module3', 'module4', 'module5']
)
# module 5's depends is missing
self._test_graph_order(
dependency,
[['base'], ['module1', 'module2', 'module3', 'module5']],
['base', 'module1', 'module2', 'module3']
)
# module 6's manifest is missing
self._test_graph_order(
dependency,
[['base'], ['module1', 'module2', 'module3', 'module4', 'module5', 'module6']],
['base', 'module1', 'module2', 'module3', 'module4', 'module5']
)
# three adding rounds
self._test_graph_order(
dependency,
[['base'], ['module1', 'module2', 'module3'], ['module4', 'module5']],
['base', 'module1', 'module2', 'module3', 'module4', 'module5']
)
def test_graph_order_2(self):
dependency = {
'base': [],
'module1': ['base'],
'module2': ['module1'],
'module3': ['module1'],
'module4': ['module3'],
'module5': ['module2'],
}
# module4 and module5 have the same depth but don't have shared depends
# they should be ordered by name
self._test_graph_order(
dependency,
[['base'], ['module3', 'module4', 'module1', 'module5', 'module2']],
['base', 'module1', 'module2', 'module3', 'module4', 'module5']
)
def test_graph_order_3(self):
dependency = {
'base': [],
'module1': ['base'],
'module2': ['module1'],
# depends loop
'module3': ['module1', 'module5'],
'module4': ['module2', 'module3'],
'module5': ['module2', 'module4'],
}
self._test_graph_order(
dependency,
[['base'], ['module3', 'module4', 'module1', 'module5', 'module2']],
['base', 'module1', 'module2']
)
def test_graph_order_with_test_modules(self):
dependency = {
'base': [],
'module1': ['base'],
'test_z': ['base'],
'test_a': ['test_z'],
'module2': ['module1'],
'module3': ['module1'],
'module4': ['module2', 'module3'],
'test_c': ['module1'],
'test_b': ['test_z', 'module4'],
}
self._test_graph_order(
dependency,
[['base'], ['test_c', 'module4', 'module2', 'test_a', 'module3', 'test_b', 'module1', 'test_z']],
['base', 'test_z', 'test_a', 'module1', 'test_c', 'module2', 'module3', 'module4', 'test_b']
)

View file

@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import AccessError
from odoo.exceptions import AccessError, LockError
from odoo.tests.common import TransactionCase, tagged
from odoo.tools import mute_logger
from odoo import Command
@ -25,7 +24,7 @@ class TestORM(TransactionCase):
user = self.env['res.users'].create({
'name': 'test user',
'login': 'test2',
'groups_id': [Command.set([self.ref('base.group_user')])],
'group_ids': [Command.set([self.ref('base.group_user')])],
})
cs = (c1 + c2).with_user(user)
self.assertEqual([{'id': c2.id, 'name': 'Y'}], cs.read(['name']), "read() should skip deleted records")
@ -38,7 +37,8 @@ class TestORM(TransactionCase):
def test_access_partial_deletion(self):
""" Check accessing a record from a recordset where another record has been deleted. """
Model = self.env['res.country']
self.assertTrue(type(Model).display_name.automatic, "test assumption not satisfied")
display_name_field = Model._fields['display_name']
self.assertTrue(display_name_field.compute and not display_name_field.store, "test assumption not satisfied")
# access regular field when another record from the same prefetch set has been deleted
records = Model.create([{'name': name[0], 'code': name[1]} for name in (['Foo', 'ZV'], ['Bar', 'ZX'], ['Baz', 'ZY'])])
@ -60,7 +60,7 @@ class TestORM(TransactionCase):
user = self.env['res.users'].create({
'name': 'test user',
'login': 'test2',
'groups_id': [Command.set([self.ref('base.group_user')])],
'group_ids': [Command.set([self.ref('base.group_user')])],
})
partner_model = self.env['ir.model'].search([('model','=','res.partner')])
@ -152,6 +152,59 @@ class TestORM(TransactionCase):
recs = partner.browse([0])
self.assertFalse(recs.exists())
def test_lock_for_update(self):
partner = self.env['res.partner']
p1, p2 = partner.search([], limit=2)
# lock p1
p1.lock_for_update(allow_referencing=True)
p1.lock_for_update(allow_referencing=False)
with self.env.registry.cursor() as cr:
recs = (p1 + p2).with_env(partner.env(cr=cr))
with self.assertRaises(LockError):
recs.lock_for_update()
sub_p2 = recs[1]
sub_p2.lock_for_update()
# parent transaction and read, but cannot lock the p2 records
p2.invalidate_model()
self.assertTrue(p2.name)
with self.assertRaises(LockError):
p2.lock_for_update()
# can still read from parent after locks and lock failures
p1.invalidate_model()
self.assertTrue(p1.name)
# can lock p2 now
p2.lock_for_update()
# cannot lock inexisting record
inexisting = partner.create({'name': 'inexisting'})
inexisting.unlink()
self.assertFalse(inexisting.exists())
with self.assertRaises(LockError):
inexisting.lock_for_update()
def test_try_lock_for_update(self):
partner = self.env['res.partner']
p1, p2, *_other = recs = partner.search([], limit=4)
# lock p1
self.assertEqual(p1.try_lock_for_update(allow_referencing=True), p1)
self.assertEqual(p1.try_lock_for_update(allow_referencing=False), p1)
with self.env.registry.cursor() as cr:
sub_recs = (p1 + p2).with_env(partner.env(cr=cr))
self.assertEqual(sub_recs.try_lock_for_update(), sub_recs[1])
self.assertEqual(recs.try_lock_for_update(limit=1), p1)
self.assertEqual(recs.try_lock_for_update(), recs)
# check that order is preserved when limiting
self.assertEqual(recs[::-1].try_lock_for_update(limit=1), recs[-1])
def test_write_duplicate(self):
p1 = self.env['res.partner'].create({'name': 'W'})
(p1 + p1).write({'name': 'X'})
@ -162,14 +215,14 @@ class TestORM(TransactionCase):
user = self.env['res.users'].create({
'name': 'test',
'login': 'test_m2m_store_trigger',
'groups_id': [Command.set([])],
'group_ids': [Command.set([])],
})
self.assertTrue(user.share)
group_user.write({'users': [Command.link(user.id)]})
group_user.write({'user_ids': [Command.link(user.id)]})
self.assertFalse(user.share)
group_user.write({'users': [Command.unlink(user.id)]})
group_user.write({'user_ids': [Command.unlink(user.id)]})
self.assertTrue(user.share)
def test_create_multi(self):

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase, tagged
@ -32,6 +31,8 @@ class TestOrmCache(TransactionCase):
cache, key, counter = get_cache_key_counter(IMD._xmlid_lookup, XMLID)
hit = counter.hit
miss = counter.miss
tx_hit = counter.tx_hit
tx_miss = counter.tx_miss
# clear the caches of ir.model.data, retrieve its key and
self.env.registry.clear_cache()
@ -41,18 +42,24 @@ class TestOrmCache(TransactionCase):
self.env.ref(XMLID)
self.assertEqual(counter.hit, hit)
self.assertEqual(counter.miss, miss + 1)
self.assertEqual(counter.tx_hit, tx_hit)
self.assertEqual(counter.tx_miss, tx_miss + 1)
self.assertIn(key, cache)
# lookup again
self.env.ref(XMLID)
self.assertEqual(counter.hit, hit + 1)
self.assertEqual(counter.miss, miss + 1)
self.assertEqual(counter.tx_hit, tx_hit)
self.assertEqual(counter.tx_miss, tx_miss + 1)
self.assertIn(key, cache)
# lookup again
self.env.ref(XMLID)
self.assertEqual(counter.hit, hit + 2)
self.assertEqual(counter.miss, miss + 1)
self.assertEqual(counter.tx_hit, tx_hit)
self.assertEqual(counter.tx_miss, tx_miss + 1)
self.assertIn(key, cache)
def test_invalidation(self):
@ -116,11 +123,11 @@ class TestOrmCache(TransactionCase):
)
def test_signaling_01_single(self):
self.assertFalse(self.registry.test_cr)
self.assertFalse(self._registry_patched)
self.registry.cache_invalidated.clear()
registry = self.registry
old_sequences = dict(registry.cache_sequences)
with self.assertLogs('odoo.modules.registry') as logs:
with self.assertLogs('odoo.registry') as logs:
registry.cache_invalidated.add('assets')
self.assertEqual(registry.cache_invalidated, {'assets'})
registry.signal_changes()
@ -128,7 +135,7 @@ class TestOrmCache(TransactionCase):
self.assertEqual(
logs.output,
["INFO:odoo.modules.registry:Caches invalidated, signaling through the database: ['assets']"],
["INFO:odoo.registry:Caches invalidated, signaling through the database: ['assets']"],
)
for key, value in old_sequences.items():
@ -148,15 +155,15 @@ class TestOrmCache(TransactionCase):
registry.check_signaling()
self.assertEqual(
logs.output,
["INFO:odoo.modules.registry:Invalidating caches after database signaling: ['assets', 'templates.cached_values']"],
["INFO:odoo.registry:Invalidating caches after database signaling: ['assets', 'templates.cached_values']"],
)
def test_signaling_01_multiple(self):
self.assertFalse(self.registry.test_cr)
self.assertFalse(self._registry_patched)
self.registry.cache_invalidated.clear()
registry = self.registry
old_sequences = dict(registry.cache_sequences)
with self.assertLogs('odoo.modules.registry') as logs:
with self.assertLogs('odoo.registry') as logs:
registry.cache_invalidated.add('assets')
registry.cache_invalidated.add('default')
self.assertEqual(registry.cache_invalidated, {'assets', 'default'})
@ -166,7 +173,7 @@ class TestOrmCache(TransactionCase):
self.assertEqual(
logs.output,
[
"INFO:odoo.modules.registry:Caches invalidated, signaling through the database: ['assets', 'default']",
"INFO:odoo.registry:Caches invalidated, signaling through the database: ['assets', 'default']",
],
)
@ -187,5 +194,44 @@ class TestOrmCache(TransactionCase):
registry.check_signaling()
self.assertEqual(
logs.output,
["INFO:odoo.modules.registry:Invalidating caches after database signaling: ['assets', 'default', 'templates.cached_values']"],
["INFO:odoo.registry:Invalidating caches after database signaling: ['assets', 'default', 'templates.cached_values']"],
)
def test_signaling_gc(self):
cr = self.env.cr
cr.execute('SELECT last_value FROM orm_signaling_registry_id_seq')
sequence_start = cr.fetchone()[0]
def assertSignalCount(expected_count, expected_max_id, message):
cr.execute("SELECT count(*), max(id) FROM orm_signaling_registry")
count, max_id = cr.fetchone()
self.assertEqual(expected_count, count, message)
self.assertEqual(expected_max_id, max_id-sequence_start, message)
cr.execute('DELETE FROM orm_signaling_registry')
for _ in range (7):
cr.execute("INSERT INTO orm_signaling_registry (date) VALUES (NOW() - interval '2 hours')")
cr.execute("INSERT INTO orm_signaling_registry DEFAULT VALUES")
assertSignalCount(8, 8, "8 signals were inserted")
self.env['ir.autovacuum']._gc_orm_signaling()
assertSignalCount(8, 8, "less than 10 signals, no deletion")
for _ in range (5):
cr.execute("INSERT INTO orm_signaling_registry DEFAULT VALUES")
assertSignalCount(13, 13, "5 more signals were inserted")
self.env['ir.autovacuum']._gc_orm_signaling()
assertSignalCount(10, 13, "more than 10 signals, some should have been deleted")
for _ in range (7):
cr.execute("INSERT INTO orm_signaling_registry DEFAULT VALUES")
assertSignalCount(17, 20, "7 more signals were inserted")
self.env['ir.autovacuum']._gc_orm_signaling()
assertSignalCount(13, 20, "Keeping the 13 signals having less than one hour")
# reset sequence to avoid side effects
cr.execute(f"SELECT setval('orm_signaling_registry_id_seq', {sequence_start})")

View file

@ -6,7 +6,7 @@ import time
from unittest.mock import patch
from odoo.exceptions import AccessError
from odoo.tests.common import BaseCase, TransactionCase, tagged, new_test_user
from odoo.tests.common import BaseCase, TransactionCase, tagged, new_test_user, HttpCase
from odoo.tools import profiler
from odoo.tools.profiler import Profiler, ExecutionContext
from odoo.tools.speedscope import Speedscope
@ -115,7 +115,6 @@ class TestSpeedscope(BaseCase):
def test_converts_profile_no_end(self):
profile = self.example_profile()
profile['result'].pop()
sp = Speedscope(init_stack_trace=profile['init_stack_trace'])
sp.add('profile', profile['result'])
sp.add_output(['profile'], complete=False)
@ -209,6 +208,49 @@ class TestSpeedscope(BaseCase):
(10.35, 'C', 'main'),
])
def test_following_queries_dont_merge(self):
sql_profile = self.example_profile()['result']
stack = sql_profile[1]['stack']
# make sql_profile two frames, separataed by some time
sql_profile = [
{
'start': 0.0,
'time': 1,
'query': 'SELECT 1',
'full_query': 'SELECT 1',
'stack': stack[:]
},
{
'start': 10.0,
'time': 1,
'query': 'SELECT 1',
'full_query': 'SELECT 1',
'stack': stack[:]
}
]
sp = Speedscope(init_stack_trace=[])
sp.add('sql', sql_profile)
sp.add_output(['sql'], complete=False, hide_gaps=True)
res = sp.make()
sql_output = res['profiles'][0]
events = [
(e['at'], e['type'], res['shared']['frames'][e['frame']]['name'])
for e in sql_output['events']
]
self.assertEqual(events, [
# pylint: disable=bad-continuation
(0.0, 'O', 'main'),
(0.0, 'O', 'do_stuff1'),
(0.0, 'O', 'execute'),
(0.0, 'O', "sql('SELECT 1')"),
(2.0, 'C', "sql('SELECT 1')"),
(2.0, 'C', 'execute'),
(2.0, 'C', 'do_stuff1'),
(2.0, 'C', 'main'),
])
def test_converts_context(self):
stack = [
['file.py', 10, 'level1', 'level1'],
@ -532,8 +574,7 @@ class TestProfiling(TransactionCase):
def test_profiler_return(self):
# Enter test mode to avoid the profiler to commit the result
self.registry.enter_test_mode(self.cr)
self.addCleanup(self.registry.leave_test_mode)
self.registry_enter_test_mode()
# Trick: patch db_connect() to make it return the registry with the current test cursor
# See `ProfilingHttpCase`
self.startClassPatcher(patch('odoo.sql_db.db_connect', return_value=self.registry))
@ -663,3 +704,10 @@ class TestSyncRecorder(BaseCase):
stacks_lines = [[frame[1] for frame in stack] for stack in stacks]
self.assertEqual(stacks_lines[1][0] + 1, stacks_lines[3][0],
"Call of b() in a() should be one line before call of c()")
@tagged('-standard', 'profiling_memory')
class TestMemoryProfiler(HttpCase):
def test_memory_profiler(self):
with Profiler(collectors=['memory'], db=None):
self.env['base.module.update'].create({}).update_module()

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import BaseCase, TransactionCase
@ -100,6 +99,25 @@ class QueryTestCase(BaseCase):
from_clause = query.from_clause.code
self.assertEqual(from_clause, '"foo" JOIN (SELECT id FROM foo) AS "foo__bar" ON ("foo"."bar_id" = "foo__bar"."id")')
def test_empty_set_result_ids(self):
query = Query(None, 'foo')
query.set_result_ids([])
self.assertEqual(query.get_result_ids(), ())
self.assertTrue(query.is_empty())
self.assertIn('SELECT', query.subselect().code, "subselect must contain SELECT")
query.add_where(SQL("x > 0"))
self.assertTrue(query.is_empty(), "adding where clauses keeps the result empty")
def test_set_result_ids(self):
query = Query(None, 'foo')
query.set_result_ids([1, 2, 3])
self.assertEqual(query.get_result_ids(), (1, 2, 3))
self.assertFalse(query.is_empty())
query.add_where(SQL("x > 0"))
self.assertIsNone(query._ids, "adding where clause resets the ids")
class TestQuery(TransactionCase):
def test_auto(self):
@ -115,17 +133,17 @@ class TestQuery(TransactionCase):
records = self.env['res.partner.category']
query = records._as_query()
self.assertEqual(list(query), records.ids)
self.cr.execute(*query.select())
self.cr.execute(query.select())
self.assertEqual([row[0] for row in self.cr.fetchall()], records.ids)
records = self.env['res.partner.category'].search([])
query = records._as_query()
self.assertEqual(list(query), records.ids)
self.cr.execute(*query.select())
self.cr.execute(query.select())
self.assertEqual([row[0] for row in self.cr.fetchall()], records.ids)
records = records.browse(reversed(records.ids))
query = records._as_query()
self.assertEqual(list(query), records.ids)
self.cr.execute(*query.select())
self.cr.execute(query.select())
self.assertEqual([row[0] for row in self.cr.fetchall()], records.ids)

File diff suppressed because it is too large Load diff

View file

@ -32,6 +32,11 @@ class TestReports(odoo.tests.TransactionCase):
'account.report_invoice': invoice_domain,
'l10n_th.report_commercial_invoice': invoice_domain,
}
extra_data_reports = {
"im_livechat.report_livechat_conversation": {
"company": self.env["res.company"].search([], limit=1)
},
}
Report = self.env['ir.actions.report']
for report in Report.search([('report_type', 'like', 'qweb')]):
report_model = 'report.%s' % report.report_name
@ -45,12 +50,13 @@ class TestReports(odoo.tests.TransactionCase):
if not report_records:
_logger.info("no record found skipping report %s", report.report_name)
data = extra_data_reports.get(report.report_name, {})
# Test report generation
if not report.multi:
for record in report_records:
Report._render_qweb_html(report.id, record.ids)
Report._render_qweb_html(report.id, record.ids, data)
else:
Report._render_qweb_html(report.id, report_records.ids)
Report._render_qweb_html(report.id, report_records.ids, data)
else:
continue

View file

@ -175,7 +175,7 @@ class TestResConfigExecute(TransactionCase):
settings_only_user = ResUsers.create({
'name': 'Sleepy Joe',
'login': 'sleepy',
'groups_id': [Command.link(group_system.id)],
'group_ids': [Command.link(group_system.id)],
})
# If not enabled (like in demo data), landing on res.config will try
@ -191,38 +191,6 @@ class TestResConfigExecute(TransactionCase):
for model in forbidden_models:
_logger.warning("Settings user doesn\'t have read access to the model %s", model)
settings_view_conditional_groups = self.env['ir.ui.view'].search([
('model', '=', 'res.config.settings'),
]).groups_id
# Semi hack to recover part of the coverage lost when the groups_id
# were moved from the views records to the view nodes (with groups attributes)
groups_data = self.env['res.groups'].get_groups_by_application()
for group_data in groups_data:
if group_data[1] == 'selection' and group_data[3] != (100, 'Other'):
manager_group = group_data[2][-1]
settings_view_conditional_groups += manager_group
settings_view_conditional_groups -= group_system # Already tested above
for group in settings_view_conditional_groups:
group_name = group.full_name
_logger.info("Testing settings access for group %s", group_name)
create_values = {
'name': f'Test {group_name}',
'login': group_name,
'groups_id': [Command.link(group_system.id), Command.link(group.id)]
}
user = ResUsers.create(create_values)
self._test_user_settings_view_save(user)
forbidden_models_fields = self._test_user_settings_fields_access(user)
for model, fields in forbidden_models_fields.items():
_logger.warning(
"Settings + %s user doesn\'t have read access to the model %s"
"linked to settings records by the field(s) %s",
group_name, model, ", ".join(str(field) for field in fields)
)
def _test_user_settings_fields_access(self, user):
"""Verify that settings user are able to create & save settings."""
settings = self.env['res.config.settings'].with_user(user).create({})

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from contextlib import contextmanager
@ -6,7 +5,7 @@ from unittest.mock import patch
from odoo import Command, models
from odoo.addons.base.models.ir_mail_server import extract_rfc2822_addresses
from odoo.addons.base.models.res_partner import Partner
from odoo.addons.base.models.res_partner import ResPartner
from odoo.addons.base.tests.common import TransactionCaseWithUserDemo
from odoo.exceptions import AccessError, RedirectWarning, UserError, ValidationError
from odoo.tests import Form
@ -29,7 +28,7 @@ class TestPartner(TransactionCaseWithUserDemo):
@contextmanager
def mockPartnerCalls(self):
_original_create = Partner.create
_original_create = ResPartner.create
self._new_partners = self.env['res.partner']
def _res_partner_create(model, *args, **kwargs):
@ -37,7 +36,7 @@ class TestPartner(TransactionCaseWithUserDemo):
self._new_partners += records.sudo()
return records
with patch.object(Partner, 'create',
with patch.object(ResPartner, 'create',
autospec=True, side_effect=_res_partner_create):
yield
@ -66,19 +65,19 @@ class TestPartner(TransactionCaseWithUserDemo):
})
# Cannot archive the partner
with self.assertRaises(RedirectWarning):
test_partner.with_user(self.env.ref('base.user_admin')).toggle_active()
test_partner.with_user(self.env.ref('base.user_admin')).action_archive()
with self.assertRaises(ValidationError):
test_partner.with_user(self.user_demo).toggle_active()
test_partner.with_user(self.user_demo).action_archive()
# Can archive the user but the partner stays active
test_user.toggle_active()
test_user.action_archive()
self.assertTrue(test_partner.active, 'Parter related to user should remain active')
# Now we can archive the partner
test_partner.toggle_active()
test_partner.action_archive()
# Activate the user should reactivate the partner
test_user.toggle_active()
test_user.action_unarchive()
self.assertTrue(test_partner.active, 'Activating user must active related partner')
def test_email_formatted(self):
@ -282,7 +281,7 @@ class TestPartner(TransactionCaseWithUserDemo):
self.assertEqual(partners[0][1], 'B Raoul chirurgiens-dentistes.fr', 'Incorrect partner returned, should be the first active')
def test_name_search_with_user(self):
""" Check name_search on partner, especially with domain based on auto_join
""" Check name_search on partner, especially with domain based on bypass_search_access
user_ids field. Check specific SQL of name_search correctly handle joined tables. """
test_partner = self.env['res.partner'].create({'name': 'Vlad the Impaler'})
test_user = self.env['res.users'].create({'name': 'Vlad the Impaler', 'login': 'vlad', 'email': 'vlad.the.impaler@example.com'})
@ -290,14 +289,14 @@ class TestPartner(TransactionCaseWithUserDemo):
ns_res = self.env['res.partner'].name_search('Vlad', operator='ilike')
self.assertEqual(set(i[0] for i in ns_res), set((test_partner | test_user.partner_id).ids))
ns_res = self.env['res.partner'].name_search('Vlad', args=[('user_ids.email', 'ilike', 'vlad')])
ns_res = self.env['res.partner'].name_search('Vlad', domain=[('user_ids.email', 'ilike', 'vlad')])
self.assertEqual(set(i[0] for i in ns_res), set(test_user.partner_id.ids))
# Check a partner may be searched when current user has no access but sudo is used
public_user = self.env.ref('base.public_user')
with self.assertRaises(AccessError):
test_partner.with_user(public_user).check_access('read')
ns_res = self.env['res.partner'].with_user(public_user).sudo().name_search('Vlad', args=[('user_ids.email', 'ilike', 'vlad')])
ns_res = self.env['res.partner'].with_user(public_user).sudo().name_search('Vlad', domain=[('user_ids.email', 'ilike', 'vlad')])
self.assertEqual(set(i[0] for i in ns_res), set(test_user.partner_id.ids))
def test_partner_merge_wizard_dst_partner_id(self):
@ -328,12 +327,51 @@ class TestPartner(TransactionCaseWithUserDemo):
'parent_id': parent_contact.id,
})
self.assertEqual(child_contact.with_context(lang='en_US').display_name, 'Parent, Other Address')
self.assertEqual(child_contact.with_context(lang='en_US').display_name, 'Parent, Other')
self.assertEqual(child_contact.with_context(lang='fr_FR').display_name, 'Parent, Autre adresse')
self.assertEqual(child_contact.with_context(lang='fr_FR').display_name, 'Parent, Autre')
def test_main_user_id(self):
"""Test main_user_id compute, including OdooBot special case and priority among several users."""
self.assertEqual(self.env.ref("base.partner_root").main_user_id, self.env.ref("base.user_root"))
partner = self.env["res.partner"].create({"name": "Test Partner"})
# archived users are ignored
self.env["res.users"].create(
{"active": False, "login": "archived_user", "partner_id": partner.id},
)
self.assertFalse(partner.main_user_id)
# portal users are taken as last resort
portal_user = self.env["res.users"].create(
{
"group_ids": [Command.set([self.ref("base.group_portal")])],
"login": "portal_user",
"partner_id": partner.id,
},
)
self.assertEqual(partner.main_user_id, portal_user)
# internal users are preferred over portal users
internal_user = self.env["res.users"].create(
{
"group_ids": [Command.set([self.ref("base.group_user")])],
"login": "internal_user",
"partner_id": partner.id,
},
)
self.assertEqual(partner.main_user_id, internal_user)
# smaller id is preferred when other conditions are the same to ensure determinism
self.env["res.users"].create(
{
"group_ids": [Command.set([self.ref("base.group_user")])],
"login": "internal_user_1d_2",
"partner_id": partner.id,
},
)
self.assertEqual(partner.main_user_id, internal_user)
# current user is always preferred
self.assertEqual(partner.with_user(portal_user).main_user_id, portal_user)
@tagged('res_partner')
@tagged('res_partner', 'res_partner_address')
class TestPartnerAddressCompany(TransactionCase):
@classmethod
@ -524,16 +562,14 @@ class TestPartnerAddressCompany(TransactionCase):
for child in inv, deli, other:
self.assertEqual(child.street, f'{child.name} Street', 'Should not be updated')
# UPSTREAM: child -> parent update: not done currently, consider contact is readonly
# UPSTREAM: child -> parent update: contact update company
# ------------------------------------------------------------
ct1.write(self.test_address_values_3)
for fname, fvalue in self.test_address_values_2_cmp.items():
self.assertEqual(self.test_parent[fname], fvalue)
self.assertEqual(ct2[fname], fvalue)
self.assertEqual(self.existing[fname], fvalue)
for fname, fvalue in self.test_address_values_3_cmp.items():
self.assertEqual(self.test_parent[fname], fvalue)
self.assertEqual(ct1[fname], fvalue)
self.assertEqual(ct1_1[fname], fvalue)
self.assertEqual(ct2[fname], fvalue)
@users('employee')
def test_address_first_contact_sync(self):
@ -682,6 +718,52 @@ class TestPartnerAddressCompany(TransactionCase):
self.assertEqual(leaf111.address_get([]),
{'contact': branch11.id}, 'Invalid address resolution, branch11 should now be contact')
@users('employee')
def test_address_parent_company_creation(self):
""" When creating parent company, it should be populated with information
coming from children when possible, and not erase child with void values
from parent. """
sync_commercial_fields = self.env['res.partner']._synced_commercial_fields()
# create your contact
individual = self.env['res.partner'].create({
'industry_id': self.test_industries[0].id,
'is_company': False,
'name': 'Individual',
'ref': 'REFINDIVIDUAL',
'vat': 'BEINDIVIDUAL',
**self.test_address_values,
})
self.assertFalse(individual.is_company)
self.assertEqual(individual.type, 'contact')
self.assertEqual(individual.ref, 'REFINDIVIDUAL')
self.assertEqual(individual.vat, 'BEINDIVIDUAL')
for fname, fvalue in self.test_address_values_cmp.items():
self.assertEqual(individual[fname], fvalue)
# create a company through "quick create", which would have partial default
# values for some company values
company = self.env['res.partner'].create({
'is_company': True,
'name': 'Company',
'ref': 'COMPANYREF',
})
# set it as parent of individual
with patch.object(
self.env['res.partner'].__class__, '_synced_commercial_fields',
lambda self: sync_commercial_fields + ['ref'],
):
individual.write({'parent_id': company})
self.assertFalse(company.industry_id, 'Industry is not considered for upstream')
self.assertEqual(company.ref, 'COMPANYREF', 'not updated from contact child')
self.assertEqual(company.vat, 'BEINDIVIDUAL')
for fname, fvalue in self.test_address_values_cmp.items():
self.assertEqual(company[fname], fvalue, 'Void parent should have been updated when adding a contact with address')
self.assertEqual(individual[fname], fvalue, 'Setting parent with void address should not reset child')
self.assertEqual(individual.industry_id, self.test_industries[0], 'No upstream sync, but no reset either')
self.assertEqual(individual.ref, 'COMPANYREF', 'downstream update')
self.assertEqual(individual.vat, 'BEINDIVIDUAL')
def test_commercial_partner_nullcompany(self):
""" The commercial partner is the first/nearest ancestor-or-self which
is a company or doesn't have a parent
@ -774,44 +856,120 @@ class TestPartnerAddressCompany(TransactionCase):
for fname, fvalue in (('company_registry', 'new'), ('industry_id', self.test_industries[1]), ('vat', 'BEnew')):
self.assertEqual(partner[fname], fvalue, "Commercial field should be updated from the company 2")
# UPSTREAM: not supported (but desyncs it)
# UPSTREAM: now supported
contactvat = 'BE445566'
contact.write({'vat': contactvat})
for partner in company_2 + contact_dlr + contact_ct + contact2:
self.assertEqual(partner.vat, 'BEnew', 'Sync to children should only work downstream and on commercial entities')
for partner in contact:
self.assertEqual(partner.vat, contactvat, 'Sync to children should only work downstream and on commercial entities')
for partner in company_2 + contact + contact_dlr + contact_ct + contact2:
self.assertEqual(partner.vat, contactvat, 'Commercial sync works upstream, therefore also for siblings')
# MISC PARENT MANIPULATION
# promote p1 to commercial entity
newcontactvat = 'BE998877'
contact.write({
'parent_id': company_1.id,
'is_company': True,
'name': 'Sunhelm Subsidiary',
'vat': newcontactvat,
})
self.assertEqual(contact.vat, contactvat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEqual(contact.vat, newcontactvat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEqual(contact.commercial_partner_id, contact, 'Incorrect commercial entity resolution after setting is_company')
self.assertEqual(contact2.vat, contactvat, 'Old sibling untouched')
self.assertEqual(company_1.vat, 'BE013456789', 'Should not impact parent')
self.assertEqual(contact_dlr.vat, 'BEnew', 'Promotion not propagated')
self.assertEqual(contact_ct.vat, 'BEnew', 'Promotion not propagated')
self.assertEqual(contact_dlr.vat, newcontactvat, 'Promotion propagated')
self.assertEqual(contact_ct.vat, newcontactvat, 'Promotion propagated')
# change parent of commercial entity
(contact_dlr + contact_ct).write({'vat': contactvat})
contact.write({'parent_id': company_2.id})
self.assertEqual(contact.vat, contactvat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEqual(contact.vat, newcontactvat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEqual(contact.commercial_partner_id, contact, 'Incorrect commercial entity resolution after setting is_company')
self.assertEqual(company_2.vat, 'BEnew', 'Should not impact parent')
self.assertEqual(contact_dlr.vat, contactvat, 'Parent company stop auto sync')
self.assertEqual(contact_ct.vat, contactvat, 'Parent company stop auto sync')
self.assertEqual(company_2.vat, contactvat, 'Should not impact parent')
self.assertEqual(contact_dlr.vat, newcontactvat, 'Parent company stop auto sync')
self.assertEqual(contact_ct.vat, newcontactvat, 'Parent company stop auto sync')
# writing on parent should not touch child commercial entities
sunhelmvat2 = 'BE0112233453'
company_2.write({'vat': sunhelmvat2})
for partner in contact + contact_ct + contact_dlr:
self.assertEqual(contact.vat, contactvat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEqual(contact.vat, newcontactvat, 'Setting is_company should stop auto-sync of commercial fields')
for partner in contact2:
self.assertEqual(partner.vat, sunhelmvat2, 'Commercial fields must be automatically synced')
def test_commercial_field_sync_reset(self):
""" Test voiding fields propagation. We would like to allow forcing void
values from parent, but limiting upstream reset from children. """
sync_commercial_fields = self.env['res.partner']._synced_commercial_fields()
# create your contact
individual = self.env['res.partner'].create({
'is_company': False,
'name': 'Individual',
'ref': 'REFINDIV',
'vat': 'BEINDIVIDUAL',
**self.test_address_values,
})
self.assertFalse(individual.is_company)
self.assertEqual(individual.type, 'contact')
self.assertEqual(individual.ref, 'REFINDIV')
self.assertEqual(individual.vat, 'BEINDIVIDUAL')
for fname, fvalue in self.test_address_values_cmp.items():
self.assertEqual(individual[fname], fvalue)
# create a company with values
company = self.env['res.partner'].create({
'industry_id': self.test_industries[1].id,
'is_company': True,
'name': 'Company',
'ref': 'REFCOMPANY',
'vat': 'BECOMPANY',
**self.test_address_values_2,
})
# set it as parent of individual
with patch.object(
self.env['res.partner'].__class__, '_synced_commercial_fields',
lambda self: sync_commercial_fields + ['ref'],
):
individual.write({'parent_id': company})
for fname, fvalue in self.test_address_values_2_cmp.items():
self.assertEqual(company[fname], fvalue, 'Parent address should have been kept')
self.assertEqual(company.industry_id, self.test_industries[1], 'Parent commercial field industry should have been kept')
self.assertEqual(company.ref, 'REFCOMPANY', 'Parent commercial field VAT should have been kept')
self.assertEqual(company.vat, 'BECOMPANY', 'Parent commercial field VAT should have been kept')
for fname, fvalue in self.test_address_values_2_cmp.items():
self.assertEqual(individual[fname], fvalue, 'Setting parent with an address should force contact address, even if set previously')
self.assertEqual(individual.industry_id, self.test_industries[1], 'Commercial fields should be synced from parent')
self.assertEqual(individual.ref, 'REFCOMPANY', 'Commercial fields should be synced from parent')
self.assertEqual(individual.vat, 'BECOMPANY', 'Commercial fields should be synced from parent')
# void from parent: DOWNSTREAM reset
with patch.object(
self.env['res.partner'].__class__, '_synced_commercial_fields',
lambda self: sync_commercial_fields + ['ref'],
):
company.write({
'industry_id': False,
'ref': False,
'vat': False,
})
self.assertFalse(individual.industry_id)
self.assertFalse(individual.ref)
self.assertFalse(individual.vat)
# reset values, and void from child: UPSTREAM RESET
company.write({
'industry_id': self.test_industries[1].id,
'vat': 'BECOMPANY'
})
self.assertEqual(individual.industry_id, self.test_industries[1])
self.assertEqual(individual.vat, 'BECOMPANY')
individual.write({
'industry_id': False,
'vat': False,
})
self.assertEqual(company.industry_id, self.test_industries[1], 'No upstream support of reset')
self.assertEqual(company.vat, 'BECOMPANY', 'No upstream support of reset')
self.assertFalse(individual.industry_id)
self.assertFalse(individual.vat)
def test_company_dependent_commercial_sync(self):
ResPartner = self.env['res.partner']
@ -842,6 +1000,22 @@ class TestPartnerAddressCompany(TransactionCase):
self.assertEqual(child_address.with_company(company_1).barcode, 'Company 1')
self.assertEqual(child_address.with_company(company_2).barcode, 'Company 2')
def test_company_dependent_commercial_sync_falsy_fields(self):
"""Check that company-dependent fields still sync when unset on current company."""
ResPartner = self.env['res.partner']
alt_company = self.env.company.create({'name': "Alt Company"})
parent = ResPartner.create({'name': "Parent", 'is_company': True, 'barcode': False})
parent.with_company(alt_company).barcode = "BARCODE"
with (
patch.object(ResPartner.__class__, '_commercial_fields', lambda self: ['barcode']),
patch.object(ResPartner.__class__, '_validate_fields'), # skip _check_barcode_unicity
):
child = ResPartner.create({'name': "Child", 'parent_id': parent.id})
self.assertFalse(child.barcode)
self.assertEqual(child.with_company(alt_company).barcode, "BARCODE")
def test_company_change_propagation(self):
""" Check propagation of company_id across children """
User = self.env['res.users']
@ -861,7 +1035,7 @@ class TestPartnerAddressCompany(TransactionCase):
test_partner_company.write({'company_id': False})
self.assertFalse(test_user.partner_id.company_id.id, "If the company_id is deleted from the partner company, it should be propagated to its children")
with self.assertRaises(UserError, msg="You should not be able to update the company_id of the partner company if the linked user of a child partner is not an allowed to be assigned to that company"), self.cr.savepoint():
with self.assertRaises(UserError, msg="You should not be able to update the company_id of the partner company if the linked user of a child partner is not an allowed to be assigned to that company"):
test_partner_company.write({'company_id': company_2.id})
def test_display_address_missing_key(self):
@ -894,10 +1068,13 @@ class TestPartnerAddressCompany(TransactionCase):
res_bhide = test_partner_bhide.with_context(show_address=1).display_name
self.assertEqual(res_bhide, "Atmaram Bhide", "name should contain only name if address is not available, without extra commas")
res_jetha = test_partner_jetha.with_context(show_address=1, address_inline=1).display_name
self.assertEqual(res_jetha, "Jethala, Powder gali, Gokuldham Society", "name should contain comma separated name and address")
res_bhide = test_partner_bhide.with_context(show_address=1, address_inline=1).display_name
self.assertEqual(res_bhide, "Atmaram Bhide", "name should contain only name if address is not available, without extra commas")
# Check that a child contact having no name shows the formatted display name as {parent_name} \t --{contact_type}--
test_partner_invoice = self.env['res.partner'].create({'parent_id': self.test_parent.id, 'type': 'invoice'})
self.assertEqual(
test_partner_invoice.with_context(formatted_display_name=True).display_name,
"GhostStep \t --Invoice--",
"Formatted display name should show parent name and type when child contact has no name",
)
def test_accessibility_of_company_partner_from_branch(self):
""" Check accessibility of company partner from branch. """

View file

@ -31,7 +31,6 @@ class TestResPartnerBank(SavepointCaseWithUserDemo):
# sanitaze the acc_number
sanitized_acc_number = 'BE001251882303'
self.assertEqual(partner_bank.sanitized_acc_number, sanitized_acc_number)
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number)])
self.assertEqual(1, len(vals))
@ -50,7 +49,3 @@ class TestResPartnerBank(SavepointCaseWithUserDemo):
vals = partner_bank_model.search(
[('acc_number', '=', acc_number.lower())])
self.assertEqual(1, len(vals))
# updating the sanitized value will also update the acc_number
partner_bank.write({'sanitized_acc_number': 'BE001251882303WRONG'})
self.assertEqual(partner_bank.acc_number, partner_bank.sanitized_acc_number)

View file

@ -1,4 +1,6 @@
from odoo.tests.common import TransactionCase
from odoo.exceptions import AccessError
from odoo import Command
class TestMergePartner(TransactionCase):
@ -99,3 +101,49 @@ class TestMergePartner(TransactionCase):
self.assertTrue(self.partner1.exists(), "Destination partner should exist after merge")
self.assertEqual(self.attachment1.res_id, self.partner1.id, "Attachment should be linked to the destination partner")
self.assertEqual(self.attachment2.res_id, self.partner1.id, "Attachment should be reassigned to the destination partner")
def test_merge_partners_with_peon_user(self):
""" Test merging partners with a user having the bare minimum access rights"""
self.env["ir.model.access"].create({
'name': 'peon.access.merge.wizard',
'group_id': self.env.ref('base.group_user').id,
'model_id': self.env.ref('base.model_base_partner_merge_automatic_wizard').id,
'perm_read': 1,
'perm_write': 1,
'perm_create': 1,
})
self.env["ir.model.access"].create({
'name': 'peon.access.merge.wizard.line',
'group_id': self.env.ref('base.group_user').id,
'model_id': self.env.ref('base.model_base_partner_merge_line').id,
'perm_read': 1,
'perm_write': 1,
'perm_create': 1,
})
partner_peon = self.env['res.partner'].create({
'name': 'Peon',
'email': 'mark.peon@example.com',
})
user_peon = self.env['res.users'].create({
'login': 'peon',
'password': 'peon',
'partner_id': partner_peon.id,
'group_ids': [Command.set([self.env.ref('base.group_user').id])],
})
# internal user doesn't have the right to write on res.partner.bank
with self.assertRaises(AccessError):
self.bank1.with_user(user_peon).partner_id = self.partner2
wizard = self.env['base.partner.merge.automatic.wizard'].with_user(user_peon).create({})
src_partners = self.partner1 + self.partner3
wizard._merge((src_partners + self.partner2).ids, self.partner2, extra_checks=False)
self.assertFalse(src_partners.exists(), "Source partners should be deleted after merge")
self.assertTrue(self.partner2.exists(), "Destination partner should exist after merge")
self.assertRecordValues(self.partner2.bank_ids, [
{'acc_number': '12345'},
{'acc_number': '54321'},
])
self.assertEqual(self.attachment_bank1.res_id, self.bank1.id, "Bank attachment should remain linked to the correct bank account")
self.assertEqual(self.attachment_bank3.res_id, self.bank1.id, "Bank attachment should be reassigned to the correct bank account")

View file

@ -1,18 +1,52 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from types import SimpleNamespace
from unittest.mock import patch
from odoo import SUPERUSER_ID
from odoo.addons.base.models.res_users import is_selection_groups, get_selection_groups, name_selection_groups
from odoo.exceptions import UserError, ValidationError
from odoo.api import SUPERUSER_ID
from odoo.exceptions import AccessError, UserError, ValidationError
from odoo.fields import Command
from odoo.http import _request_stack
from odoo.tests import Form, TransactionCase, new_test_user, tagged, HttpCase, users
from odoo.tests import Form, TransactionCase, new_test_user, tagged, HttpCase, users, warmup
from odoo.tools import mute_logger
class TestUsers(TransactionCase):
class UsersCommonCase(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
users = cls.env['res.users'].create([
{
'name': 'Internal',
'login': 'user_internal',
'password': 'password',
'group_ids': [cls.env.ref('base.group_user').id],
'tz': 'UTC',
},
{
'name': 'Portal 1',
'login': 'portal_1',
'password': 'portal_1',
'group_ids': [cls.env.ref('base.group_portal').id],
},
{
'name': 'Portal 2',
'login': 'portal_2',
'password': 'portal_2',
'group_ids': [cls.env.ref('base.group_portal').id],
},
])
cls.user_internal, cls.user_portal_1, cls.user_portal_2 = users
# Remove from the cache the values filled with admin rights for the users/partners that have just been created
# So unit tests reading/writing these partners/users
# as other low-privileged users do not have their cache polluted with values fetched with admin rights
users.partner_id.invalidate_recordset()
users.invalidate_recordset()
class TestUsers(UsersCommonCase):
def test_name_search(self):
""" Check name_search on user. """
@ -126,15 +160,8 @@ class TestUsers(TransactionCase):
@mute_logger('odoo.sql_db')
def test_deactivate_portal_users_access(self):
"""Test that only a portal users can deactivate his account."""
user_internal = self.env['res.users'].create({
'name': 'Internal',
'login': 'user_internal',
'password': 'password',
'groups_id': [self.env.ref('base.group_user').id],
})
with self.assertRaises(UserError, msg='Internal users should not be able to deactivate their account'):
user_internal._deactivate_portal_user()
self.user_internal._deactivate_portal_user()
@mute_logger('odoo.sql_db', 'odoo.addons.base.models.res_users_deletion')
def test_deactivate_portal_users_archive_and_remove(self):
@ -149,7 +176,7 @@ class TestUsers(TransactionCase):
'name': 'Portal',
'login': 'portal_user',
'password': 'password',
'groups_id': [self.env.ref('base.group_portal').id],
'group_ids': [self.env.ref('base.group_portal').id],
})
portal_partner = portal_user.partner_id
@ -157,7 +184,7 @@ class TestUsers(TransactionCase):
'name': 'Portal',
'login': 'portal_user_2',
'password': 'password',
'groups_id': [self.env.ref('base.group_portal').id],
'group_ids': [self.env.ref('base.group_portal').id],
})
portal_partner_2 = portal_user_2.partner_id
@ -183,7 +210,8 @@ class TestUsers(TransactionCase):
'model_id': self.env.ref('base.model_res_partner').id,
})
self.env['res.users.deletion']._gc_portal_users()
with self.enter_registry_test_mode():
self.env.ref('base.ir_cron_res_users_deletion').method_direct_trigger()
self.assertFalse(portal_user.exists(), 'Should have removed the user')
self.assertFalse(portal_partner.exists(), 'Should have removed the partner')
@ -239,23 +267,35 @@ class TestUsers(TransactionCase):
self.assertEqual(user.context_get()['lang'], 'en_US')
@tagged('post_install', '-at_install')
class TestUsers2(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user_employee = cls.env['res.users'].create({
'name': 'employee',
'login': 'employee',
'groups_id': cls.env.ref('base.group_user'),
'tz': 'UTC',
})
def test_user_self_update(self):
""" Check that the user has access to write his phone. """
test_user = self.env['res.users'].create({'name': 'John Smith', 'login': 'jsmith'})
self.assertFalse(test_user.phone)
test_user.with_user(test_user).write({'phone': '2387478'})
self.assertEqual(
test_user.partner_id.phone,
'2387478',
"The phone of the partner_id shall be updated."
)
def test_session_non_existing_user(self):
"""
Test to check the invalidation of session bound to non existing (or deleted) users.
"""
User = self.env['res.users']
last_user_id = User.with_context(active_test=False).search([], limit=1, order="id desc")
non_existing_user = User.browse(last_user_id.id + 1)
self.assertFalse(non_existing_user._compute_session_token('session_id'))
@tagged('post_install', '-at_install', 'groups')
class TestUsers2(UsersCommonCase):
def test_change_user_login(self):
""" Check that partner email is updated when changing user's login """
User = self.env['res.users']
with Form(User, view='base.view_users_form') as UserForm:
with Form(User, view='base.view_users_simple_form') as UserForm:
UserForm.name = "Test User"
UserForm.login = "test-user1"
self.assertFalse(UserForm.email)
@ -266,325 +306,194 @@ class TestUsers2(TransactionCase):
"Setting a valid email as login should update the partner's email"
)
def test_reified_groups(self):
def test_default_groups(self):
""" The groups handler doesn't use the "real" view with pseudo-fields
during installation, so it always works (because it uses the normal
groups_id field).
group_ids field).
"""
default_group = self.env.ref('base.default_user_group')
test_group = self.env['res.groups'].create({'name': 'test_group'})
default_group.implied_ids = test_group
# use the specific views which has the pseudo-fields
f = Form(self.env['res.users'], view='base.view_users_form')
f.name = "bob"
f.login = "bob"
user = f.save()
self.assertIn(self.env.ref('base.group_user'), user.groups_id)
group_user = self.env.ref('base.group_user')
# all template user groups are copied
default_user = self.env.ref('base.default_user')
self.assertEqual(default_user.groups_id, user.groups_id)
self.assertIn(group_user, user.group_ids)
self.assertEqual(default_group.implied_ids + group_user, user.group_ids)
def test_selection_groups(self):
# create 3 groups that should be in a selection
app = self.env['ir.module.category'].create({'name': 'Foo'})
group1, group2, group0 = self.env['res.groups'].create([
{'name': name, 'category_id': app.id}
app = self.env['res.groups.privilege'].create({'name': 'Foo'})
group_user, group_manager, group_visitor = self.env['res.groups'].create([
{'name': name, 'privilege_id': app.id}
for name in ('User', 'Manager', 'Visitor')
])
# THIS PART IS NECESSARY TO REPRODUCE AN ISSUE: group1.id < group2.id < group0.id
self.assertLess(group1.id, group2.id)
self.assertLess(group2.id, group0.id)
self.assertLess(group_user.id, group_manager.id)
self.assertLess(group_manager.id, group_visitor.id)
# implication order is group0 < group1 < group2
group2.implied_ids = group1
group1.implied_ids = group0
groups = group0 + group1 + group2
# determine the name of the field corresponding to groups
fname = next(
name
for name in self.env['res.users'].fields_get()
if is_selection_groups(name) and group0.id in get_selection_groups(name)
)
self.assertCountEqual(get_selection_groups(fname), groups.ids)
group_manager.implied_ids = group_user
group_user.implied_ids = group_visitor
groups = group_visitor + group_user + group_manager
# create a user
user = self.env['res.users'].create({'name': 'foo', 'login': 'foo'})
# put user in group0, and check field value
user.write({fname: group0.id})
self.assertEqual(user.groups_id & groups, group0)
self.assertEqual(user.read([fname])[0][fname], group0.id)
# put user in group_visitor, and check field value
user.write({'group_ids': [Command.set([group_visitor.id])]})
self.assertEqual(user.group_ids & groups, group_visitor)
self.assertEqual(user.all_group_ids & groups, group_visitor)
self.assertEqual(user.read(['group_ids'])[0]['group_ids'], [group_visitor.id])
self.assertEqual(user.read(['all_group_ids'])[0]['all_group_ids'], [group_visitor.id])
# put user in group1, and check field value
user.write({fname: group1.id})
self.assertEqual(user.groups_id & groups, group0 + group1)
self.assertEqual(user.read([fname])[0][fname], group1.id)
# remove group_visitor
user.write({'group_ids': [Command.unlink(group_visitor.id)]})
self.assertEqual(user.group_ids & groups, self.env['res.groups'])
# put user in group2, and check field value
user.write({fname: group2.id})
self.assertEqual(user.groups_id & groups, groups)
self.assertEqual(user.read([fname])[0][fname], group2.id)
# put user in group_manager, and check field value
user.write({'group_ids': [Command.set([group_manager.id])]})
self.assertEqual(user.group_ids & groups, group_manager)
self.assertEqual(user.all_group_ids & groups, group_visitor + group_manager + group_user)
self.assertEqual(user.read(['group_ids'])[0]['group_ids'], [group_manager.id])
self.assertEqual(set(user.read(['all_group_ids'])[0]['all_group_ids']), set((group_visitor + group_manager + group_user).ids))
normalized_values = user._remove_reified_groups({fname: group0.id})
self.assertEqual(sorted(normalized_values['groups_id']), [(3, group1.id), (3, group2.id), (4, group0.id)])
# add user in group_user, and check field value
user.write({'group_ids': [Command.link(group_user.id)]})
self.assertEqual(user.group_ids & groups, group_manager + group_user)
self.assertEqual(user.all_group_ids & groups, group_visitor + group_manager + group_user)
self.assertEqual(set(user.read(['group_ids'])[0]['group_ids']), set((group_manager + group_user).ids))
self.assertEqual(set(user.read(['all_group_ids'])[0]['all_group_ids']), set((group_visitor + group_manager + group_user).ids))
normalized_values = user._remove_reified_groups({fname: group1.id})
self.assertEqual(sorted(normalized_values['groups_id']), [(3, group2.id), (4, group1.id)])
groups = self.env['res.groups'].search([('all_user_ids', '=', user.id)])
self.assertEqual(groups, user.all_group_ids)
normalized_values = user._remove_reified_groups({fname: group2.id})
self.assertEqual(normalized_values['groups_id'], [(4, group2.id)])
def test_read_list_with_reified_field(self):
""" Check that read_group and search_read get rid of reified fields"""
User = self.env['res.users']
fnames = ['name', 'email', 'login']
# find some reified field name
reified_fname = next(
fname
for fname in User.fields_get()
if fname.startswith(('in_group_', 'sel_groups_'))
)
# check that the reified field name is not aggregable
self.assertFalse(User.fields_get([reified_fname], ['aggregator'])[reified_fname].get('aggregator'))
# check that the reified fields are not considered invalid in search_read
# and are ignored
res_with_reified = User.search_read([], fnames + [reified_fname])
res_without_reified = User.search_read([], fnames)
self.assertEqual(res_with_reified, res_without_reified, "Reified fields should be ignored in search_read")
# Verify that the read_group is raising an error if reified field is used as groupby
with self.assertRaises(ValueError):
User.read_group([], fnames + [reified_fname], [reified_fname])
def test_reified_groups_on_change(self):
"""Test that a change on a reified fields trigger the onchange of groups_id."""
def test_implied_groups_on_change(self):
"""Test that a change on a reified fields trigger the onchange of group_ids."""
group_public = self.env.ref('base.group_public')
group_portal = self.env.ref('base.group_portal')
group_user = self.env.ref('base.group_user')
# Build the reified group field name
user_groups = group_public | group_portal | group_user
user_groups_ids = [str(group_id) for group_id in sorted(user_groups.ids)]
group_field_name = f"sel_groups_{'_'.join(user_groups_ids)}"
app = self.env['res.groups.privilege'].create({'name': 'Foo'})
group_contain_user = self.env['res.groups'].create({
'name': 'Small user group',
'privilege_id': app.id,
'implied_ids': [group_user.id],
})
# <group col="4" invisible="sel_groups_1_9_10 != 1" groups="base.group_no_one" class="o_label_nowrap">
with self.debug_mode():
user_form = Form(self.env['res.users'], view='base.view_users_form')
user_form = Form(self.env['res.users'], view='base.view_users_form')
user_form.name = "Test"
user_form.login = "Test"
self.assertFalse(user_form.share)
user_form[group_field_name] = group_portal.id
self.assertTrue(user_form.share, 'The groups_id onchange should have been triggered')
user_form['group_ids'] = group_portal
self.assertTrue(user_form.share, 'The group_ids onchange should have been triggered')
user_form[group_field_name] = group_user.id
self.assertFalse(user_form.share, 'The groups_id onchange should have been triggered')
user = user_form.save()
user_form[group_field_name] = group_public.id
self.assertTrue(user_form.share, 'The groups_id onchange should have been triggered')
# in debug mode, show the group widget for external user
def test_update_user_groups_view(self):
"""Test that the user groups view can still be built if all user type groups are share"""
self.env['res.groups'].search([
("category_id", "=", self.env.ref("base.module_category_user_type").id)
]).write({'share': True})
with self.debug_mode():
user_form = Form(user, view='base.view_users_form')
self.env['res.groups']._update_user_groups_view()
user_form['group_ids'] = group_user
self.assertFalse(user_form.share, 'The group_ids onchange should have been triggered')
@users('employee')
user_form['group_ids'] = group_public
self.assertTrue(user_form.share, 'The group_ids onchange should have been triggered')
user_form['group_ids'] = group_user
user_form['group_ids'] = group_user + group_contain_user
user_form.save()
# in debug mode, allow extra groups
with self.debug_mode():
user_form = Form(self.env['res.users'], view='base.view_users_form')
user_form.name = "Test-2"
user_form.login = "Test-2"
user_form['group_ids'] = group_portal
self.assertTrue(user_form.share)
# for portal user, the view_group_extra_ids is only show in debug mode
user_form['group_ids'] = group_portal + group_contain_user
self.assertFalse(user_form.share, 'The group_ids onchange should have been triggered')
with self.assertRaises(ValidationError, msg="The user cannot be at the same time in groups: ['Membre', 'Portal', 'Foo / Small user group']"):
user_form.save()
@users('portal_1')
@mute_logger('odoo.addons.base.models.ir_model')
def test_self_writeable_fields(self):
"""Check that a portal user:
- can write on fields in SELF_WRITEABLE_FIELDS on himself,
- cannot write on fields not in SELF_WRITEABLE_FIELDS on himself,
- and none of the above on another user than himself.
"""
self.assertIn(
"post_install",
self.test_tags,
"This test **must** be `post_install` to ensure the expected behavior despite other modules",
)
self.assertIn(
"email",
self.env['res.users'].SELF_WRITEABLE_FIELDS,
"For this test to make sense, 'email' must be in the `SELF_WRITEABLE_FIELDS`",
)
self.assertNotIn(
"login",
self.env['res.users'].SELF_WRITEABLE_FIELDS,
"For this test to make sense, 'login' must not be in the `SELF_WRITEABLE_FIELDS`",
)
me = self.env["res.users"].browse(self.env.user.id)
other = self.env["res.users"].browse(self.user_portal_2.id)
# Allow to write a field in the SELF_WRITEABLE_FIELDS
me.email = "foo@bar.com"
self.assertEqual(me.email, "foo@bar.com")
# Disallow to write a field not in the SELF_WRITEABLE_FIELDS
with self.assertRaises(AccessError):
me.login = "foo"
# Disallow to write a field in the SELF_WRITEABLE_FIELDS on another user
with self.assertRaises(AccessError):
other.email = "foo@bar.com"
# Disallow to write a field not in the SELF_WRITEABLE_FIELDS on another user
with self.assertRaises(AccessError):
other.login = "foo"
@users('user_internal')
def test_self_readable_writeable_fields_preferences_form(self):
"""Test that a field protected by a `groups='...'` with a group the user doesn't belong to
but part of the `SELF_WRITEABLE_FIELDS` is shown in the user profile preferences form and is editable"""
my_user = self.env['res.users'].browse(self.env.user.id)
self.assertIn(
'email',
'name',
my_user.SELF_WRITEABLE_FIELDS,
"This test doesn't make sense if not tested on a field part of the SELF_WRITEABLE_FIELDS"
)
self.patch(self.env.registry['res.users']._fields['email'], 'groups', 'base.group_system')
self.patch(self.env.registry['res.users']._fields['name'], 'groups', 'base.group_system')
with Form(my_user, view='base.view_users_form_simple_modif') as UserForm:
UserForm.email = "foo@bar.com"
self.assertEqual(my_user.email, "foo@bar.com")
UserForm.name = "Raoulette Poiluchette"
self.assertEqual(my_user.name, "Raoulette Poiluchette")
@warmup
def test_write_group_ids_performance(self):
contact_creation_group = self.env.ref("base.group_partner_manager")
self.assertNotIn(contact_creation_group, self.user_internal.group_ids)
@tagged('post_install', '-at_install', 'res_groups')
class TestUsersGroupWarning(TransactionCase):
@classmethod
def setUpClass(cls):
"""
These are the Groups and their Hierarchy we have Used to test Group warnings.
Category groups hierarchy:
Sales
User: All Documents
Administrator
Timesheets
User: own timesheets only
User: all timesheets
Administrator
Project
User
Administrator
Field Service
User
Administrator
Implied groups hierarchy:
Sales / Administrator
Sales / User: All Documents
Timesheets / Administrator
Timesheets / User: all timesheets
Timehseets / User: own timesheets only
Project / Administrator
Project / User
Timesheets / User: all timesheets
Field Service / Administrator
Sales / Administrator
Project / Administrator
Field Service / User
"""
super().setUpClass()
ResGroups = cls.env['res.groups']
IrModuleCategory = cls.env['ir.module.category']
categ_sales = IrModuleCategory.create({'name': 'Sales'})
categ_project = IrModuleCategory.create({'name': 'Project'})
categ_field_service = IrModuleCategory.create({'name': 'Field Service'})
categ_timesheets = IrModuleCategory.create({'name': 'Timesheets'})
# Sales
cls.group_sales_user, cls.group_sales_administrator = ResGroups.create([
{'name': 'User: All Documents', 'category_id': categ_sales.id},
{'name': 'Administrator', 'category_id': categ_sales.id},
])
cls.sales_categ_field = name_selection_groups((cls.group_sales_user | cls.group_sales_administrator).ids)
cls.group_sales_administrator.implied_ids = cls.group_sales_user
# Timesheets
cls.group_timesheets_user_own_timesheet = ResGroups.create([
{'name': 'User: own timesheets only', 'category_id': categ_timesheets.id}
])
cls.group_timesheets_user_all_timesheet = ResGroups.create([
{'name': 'User: all timesheets', 'category_id': categ_timesheets.id}
])
cls.group_timesheets_administrator = ResGroups.create([
{'name': 'Administrator', 'category_id': categ_timesheets.id}
])
cls.timesheets_categ_field = name_selection_groups((cls.group_timesheets_user_own_timesheet |
cls.group_timesheets_user_all_timesheet |
cls.group_timesheets_administrator).ids
)
cls.group_timesheets_administrator.implied_ids += cls.group_timesheets_user_all_timesheet
cls.group_timesheets_user_all_timesheet.implied_ids += cls.group_timesheets_user_own_timesheet
# Project
cls.group_project_user, cls.group_project_admnistrator = ResGroups.create([
{'name': 'User', 'category_id': categ_project.id},
{'name': 'Administrator', 'category_id': categ_project.id},
])
cls.project_categ_field = name_selection_groups((cls.group_project_user | cls.group_project_admnistrator).ids)
cls.group_project_admnistrator.implied_ids = (cls.group_project_user | cls.group_timesheets_user_all_timesheet)
# Field Service
cls.group_field_service_user, cls.group_field_service_administrator = ResGroups.create([
{'name': 'User', 'category_id': categ_field_service.id},
{'name': 'Administrator', 'category_id': categ_field_service.id},
])
cls.field_service_categ_field = name_selection_groups((cls.group_field_service_user | cls.group_field_service_administrator).ids)
cls.group_field_service_administrator.implied_ids = (cls.group_sales_administrator |
cls.group_project_admnistrator |
cls.group_field_service_user).ids
# User
cls.test_group_user = cls.env['res.users'].create({
'name': 'Test Group User',
'login': 'TestGroupUser',
'groups_id': (
cls.env.ref('base.group_user') |
cls.group_timesheets_administrator |
cls.group_field_service_administrator).ids,
})
def test_user_group_empty_group_warning(self):
""" User changes Empty Sales access from 'Sales: Administrator'. The
warning should be there since 'Sales: Administrator' is required when
user is having 'Field Service: Administrator'. When user reverts the
changes, warning should disappear. """
with Form(self.test_group_user.with_context(show_user_group_warning=True), view='base.view_users_form') as UserForm:
UserForm[self.sales_categ_field] = False
self.assertEqual(
UserForm.user_group_warning,
'Since Test Group User is a/an "Field Service: Administrator", they will at least obtain the right "Sales: Administrator"'
)
UserForm[self.sales_categ_field] = self.group_sales_administrator.id
self.assertFalse(UserForm.user_group_warning)
def test_user_group_inheritance_warning(self):
""" User changes 'Sales: User' from 'Sales: Administrator'. The warning
should be there since 'Sales: Administrator' is required when user is
having 'Field Service: Administrator'. When user reverts the changes,
warning should disappear. """
with Form(self.test_group_user.with_context(show_user_group_warning=True), view='base.view_users_form') as UserForm:
UserForm[self.sales_categ_field] = self.group_sales_user.id
self.assertEqual(
UserForm.user_group_warning,
'Since Test Group User is a/an "Field Service: Administrator", they will at least obtain the right "Sales: Administrator"'
)
UserForm[self.sales_categ_field] = self.group_sales_administrator.id
self.assertFalse(UserForm.user_group_warning)
def test_user_group_inheritance_warning_multi(self):
""" User changes 'Sales: User' from 'Sales: Administrator' and
'Project: User' from 'Project: Administrator'. The warning should
be there since 'Sales: Administrator' and 'Project: Administrator'
are required when user is havning 'Field Service: Administrator'.
When user reverts the changes For 'Sales: Administrator', warning
should disappear for Sales Access."""
with Form(self.test_group_user.with_context(show_user_group_warning=True), view='base.view_users_form') as UserForm:
UserForm[self.sales_categ_field] = self.group_sales_user.id
UserForm[self.project_categ_field] = self.group_project_user.id
self.assertTrue(
UserForm.user_group_warning,
'Since Test Group User is a/an "Field Service: Administrator", they will at least obtain the right "Sales: Administrator", Project: Administrator"',
)
UserForm[self.sales_categ_field] = self.group_sales_administrator.id
self.assertEqual(
UserForm.user_group_warning,
'Since Test Group User is a/an "Field Service: Administrator", they will at least obtain the right "Project: Administrator"'
)
def test_user_group_least_possible_inheritance_warning(self):
""" User changes 'Timesheets: User: own timesheets only ' from
'Timesheets: Administrator'. The warning should be there since
'Timesheets: User: all timesheets' is at least required when user is
having 'Project: Administrator'. When user reverts the changes For
'Timesheets: User: all timesheets', warning should disappear."""
with Form(self.test_group_user.with_context(show_user_group_warning=True), view='base.view_users_form') as UserForm:
UserForm[self.timesheets_categ_field] = self.group_timesheets_user_own_timesheet.id
self.assertEqual(
UserForm.user_group_warning,
'Since Test Group User is a/an "Project: Administrator", they will at least obtain the right "Timesheets: User: all timesheets"'
)
UserForm[self.timesheets_categ_field] = self.group_timesheets_user_all_timesheet.id
self.assertFalse(UserForm.user_group_warning)
def test_user_group_parent_inheritance_no_warning(self):
""" User changes 'Field Service: User' from 'Field Service: Administrator'.
The warning should not be there since 'Field Service: User' is not affected
by any other groups."""
with Form(self.test_group_user.with_context(show_user_group_warning=True), view='base.view_users_form') as UserForm:
UserForm[self.field_service_categ_field] = self.group_field_service_user.id
self.assertFalse(UserForm.user_group_warning)
# all modules: 23, base: 10; nightly: +1
with self.assertQueryCount(24):
self.user_internal.write({
"group_ids": [Command.link(contact_creation_group.id)],
})
class TestUsersTweaks(TransactionCase):
@ -628,7 +537,7 @@ class TestUsersIdentitycheck(HttpCase):
form.password = 'admin@odoo'
# The user clicks the button "Log out from all devices", which triggers a save then a call to the button method
user_identity_check = form.save()
action = user_identity_check.run_check()
action = user_identity_check.with_context(password=form.password).run_check()
# Test the session is no longer valid
# Invalid session -> redirected from /web to /web/login

View file

@ -146,7 +146,7 @@ class test_search(TransactionCase):
states_us = country_us.state_ids[:2]
# Create test users
u = Users.create({'name': '__search', 'login': '__search', 'groups_id': [Command.set([group_employee.id])]})
u = Users.create({'name': '__search', 'login': '__search', 'group_ids': [Command.set([group_employee.id])]})
a = Users.create({'name': '__test_A', 'login': '__test_A', 'country_id': country_be.id, 'state_id': country_be.id})
b = Users.create({'name': '__test_B', 'login': '__a_test_B', 'country_id': country_us.id, 'state_id': states_us[1].id})
c = Users.create({'name': '__test_B', 'login': '__z_test_B', 'country_id': country_us.id, 'state_id': states_us[0].id})
@ -215,7 +215,7 @@ class test_search(TransactionCase):
self.patch_order('res.partner', 'create_uid, name')
self.patch_order('res.users', 'partner_id, login desc')
kw = dict(groups_id=[Command.set([self.ref('base.group_system'),
kw = dict(group_ids=[Command.set([self.ref('base.group_system'),
self.ref('base.group_partner_manager')])])
# When creating with the superuser, the ordering by 'create_uid' will
@ -291,16 +291,12 @@ class test_search(TransactionCase):
def test_22_like_folding(self):
Model = self.env['res.country']
# there is just one query for the first search as it matches all
# the second search does not run, because the domain is False
with self.assertQueries(["""
SELECT "res_country"."id"
FROM "res_country"
WHERE TRUE
ORDER BY "res_country"."name"->>%s
""", """
SELECT "res_country"."id"
FROM "res_country"
WHERE FALSE
ORDER BY "res_country"."name"->>%s
ORDER BY "res_country"."name"->>%s, "res_country"."id"
"""]):
Model.search([('code', 'ilike', '')])
Model.search([('code', 'not ilike', '')])

View file

@ -0,0 +1,168 @@
import datetime
import hashlib
import io
import os
from unittest.mock import PropertyMock, patch
from asn1crypto import algos, cms, core, x509 as asn1x509
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.x509.oid import NameOID
from odoo.addons.base.models.res_company import ResCompany
from odoo.exceptions import UserError
from odoo.tests.common import TransactionCase
from odoo.tools.misc import file_open
from odoo.tools.pdf.signature import PdfSigner
class TestSignature(TransactionCase):
"""Tests on signature tool"""
@classmethod
def setUpClass(cls):
super(TestSignature, cls).setUpClass()
cls.private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096
)
cert_subject = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, "BE"),
x509.NameAttribute(
NameOID.STATE_OR_PROVINCE_NAME, "Brabant Wallon"),
x509.NameAttribute(NameOID.LOCALITY_NAME, "Grand Rosiere"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Odoo"),
x509.NameAttribute(NameOID.COMMON_NAME, "odoo.com")
])
cls.certificate = x509.CertificateBuilder().subject_name(
cert_subject
).issuer_name(
cert_subject
).public_key(
cls.private_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.now(datetime.timezone.utc)
).not_valid_after(
datetime.datetime.now(datetime.timezone.utc) +
datetime.timedelta(days=10)
).add_extension(
x509.SubjectAlternativeName([x509.DNSName("localhost")]),
critical=False
).sign(cls.private_key, hashes.SHA256())
cls.pdf_path = "base/tests/minimal.pdf"
def test_odoo_pdf_signer(self):
fixed_time = datetime.datetime.now(datetime.timezone.utc)
with file_open(self.pdf_path, "rb") as stream:
out_stream = io.BytesIO()
with patch.object(PdfSigner, "_load_key_and_certificate", return_value=(self.private_key, self.certificate)):
signer = PdfSigner(stream, self.env, signing_time=fixed_time)
out_stream = signer.sign_pdf()
if not out_stream:
self.skipTest("Could not load the PdfSigner class properly")
pdf_data = out_stream.getvalue()
# Retrive the signature content
sig_field_index = pdf_data.rfind(b"/FT /Sig")
content_index = pdf_data.find(b"Contents", sig_field_index)
content_start_index = pdf_data.find(b"<", content_index)
content_end_index = pdf_data.find(b">", content_index)
content = pdf_data[content_start_index+1: content_end_index]
# Retrieve the computed byte range
byte_range_index = pdf_data.find(b"ByteRange")
start_bracket_index = pdf_data.find(b"[", byte_range_index)
end_bracket_index = pdf_data.find(b"]", start_bracket_index)
byte_range = pdf_data[start_bracket_index + 1: end_bracket_index].strip().split(b" ")
# Computing the hash from the resulting document
hash = hashlib.sha256()
for i in range(0, len(byte_range), 2):
hash.update(pdf_data[int(byte_range[i]):int(byte_range[i])+int(byte_range[i+1])])
result_digest = hash.digest()
cert = asn1x509.Certificate.load(
self.certificate.public_bytes(encoding=serialization.Encoding.DER))
# Setting up the content information to assert
encap_content_info = {
'content_type': 'data',
'content': None
}
attrs = cms.CMSAttributes([
cms.CMSAttribute({
'type': 'content_type',
'values': ['data']
}),
cms.CMSAttribute({
'type': 'signing_time',
'values': [cms.Time({'utc_time': core.UTCTime(fixed_time)})]
}),
cms.CMSAttribute({
'type': 'cms_algorithm_protection',
'values': [
cms.CMSAlgorithmProtection(
{
'mac_algorithm': None,
'digest_algorithm': cms.DigestAlgorithm(
{'algorithm': 'sha256', 'parameters': None}
),
'signature_algorithm': cms.SignedDigestAlgorithm({
'algorithm': 'sha256_rsa',
'parameters': None
})
}
)
]
}),
cms.CMSAttribute({
'type': 'message_digest',
'values': [result_digest],
}),
])
signed_attrs = self.private_key.sign(
attrs.dump(),
padding.PKCS1v15(),
hashes.SHA256()
)
signer_info = cms.SignerInfo({
'version': "v1",
'digest_algorithm': algos.DigestAlgorithm({'algorithm': 'sha256'}),
'signature_algorithm': algos.SignedDigestAlgorithm({'algorithm': 'sha256_rsa'}),
'signature': signed_attrs,
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': cert.issuer,
'serial_number': cert.serial_number
})
}),
'signed_attrs': attrs})
signed_data = {
'version': 'v1',
'digest_algorithms': [algos.DigestAlgorithm({'algorithm': 'sha256'})],
'encap_content_info': encap_content_info,
'certificates': [cert],
'signer_infos': [signer_info]
}
content_info = cms.ContentInfo({
'content_type': 'signed_data',
'content': cms.SignedData(signed_data)
})
signature_hex = content_info.dump().hex()
signature_hex = signature_hex.ljust(8192 * 2, "0")
self.assertEqual(signature_hex.encode(), content)

View file

@ -43,6 +43,23 @@ class TestSQL(BaseCase):
with self.assertRaises(KeyError):
SQL("SELECT id FROM table WHERE foo=%(one)s AND bar=%(two)s", one=1, to=2)
def test_escape_percent(self):
sql = SQL("'%%' || %s", 'a')
self.assertEqual(sql.code, "'%%' || %s")
with self.assertRaises(TypeError):
SQL("'%'") # not enough arguments
with self.assertRaises(ValueError):
SQL("'%' || %s", 'a') # unescaped percent
with self.assertRaises(TypeError):
SQL("'%%' || %s") # not enough arguments
self.assertEqual(SQL("'foo%%'").code, "'foo%%'")
self.assertEqual(SQL("'foo%%' || %s", 'bar').code, "'foo%%' || %s")
self.assertEqual(SQL("'foo%%' || %(bar)s", bar='bar').code, "'foo%%' || %s")
self.assertEqual(SQL("%(foo)s AND bar='baz%%'", foo=SQL("qrux='%%'")).code, "qrux='%%' AND bar='baz%%'")
self.assertEqual(SQL("%(foo)s AND bar='baz%%'", foo=SQL("%s='%%s'", "qrux")).code, "%s='%%s' AND bar='baz%%'")
def test_sql_equality(self):
sql1 = SQL("SELECT id FROM table WHERE foo=%s", 42)
sql2 = SQL("SELECT id FROM table WHERE foo=%s", 42)
@ -56,17 +73,14 @@ class TestSQL(BaseCase):
sql2 = SQL("SELECT id FROM table WHERE foo=%s", 421)
self.assertNotEqual(sql1, sql2)
def test_sql_hash(self):
hash(SQL("SELECT id FROM table WHERE x=%s", 5))
def test_sql_idempotence(self):
sql1 = SQL("SELECT id FROM table WHERE foo=%s AND bar=%s", 42, 'baz')
sql2 = SQL(sql1)
self.assertEqual(sql1, sql2)
def test_sql_unpacking(self):
sql = SQL("SELECT id FROM table WHERE foo=%s AND bar=%s", 42, 'baz')
string, params = sql
self.assertEqual(string, "SELECT id FROM table WHERE foo=%s AND bar=%s")
self.assertEqual(params, [42, 'baz'])
def test_sql_join(self):
sql = SQL(" AND ").join([])
self.assertEqual(sql.code, "")
@ -161,6 +175,7 @@ class TestSQL(BaseCase):
"""SQL('SELECT "id" FROM "table" WHERE "table"."foo"=%s AND "table"."bar"=%s', 1, 2)"""
)
class TestSqlTools(TransactionCase):
def test_add_constraint(self):
@ -169,8 +184,27 @@ class TestSqlTools(TransactionCase):
# ensure the constraint with % works and it's in the DB
with self.assertRaises(CheckViolation), mute_logger('odoo.sql_db'):
self.env['res.bank'].create({'name': '10% bank'})
self.env['res.bank'].create({'name': r'10% bank'})
# ensure the definitions match
db_definition = sql.constraint_definition(self.env.cr, 'res_bank', 'test_constraint_dummy')
self.assertEqual(definition, db_definition)
self.assertEqual(db_definition, definition)
def test_add_index(self):
definition = "(name, id)"
sql.add_index(self.env.cr, 'res_bank_test_name', 'res_bank', definition, unique=False)
# check the definition
db_definition, db_comment = sql.index_definition(self.env.cr, 'res_bank_test_name')
self.assertIn(definition, db_definition)
self.assertIs(db_comment, None)
def test_add_index_escape(self):
definition = "(id) WHERE name ~ '%'"
comment = r'some%comment'
sql.add_index(self.env.cr, 'res_bank_test_percent_escape', 'res_bank', definition, unique=False, comment=comment)
# ensure the definitions match (definition is the comment if it is set)
db_definition, db_comment = sql.index_definition(self.env.cr, 'res_bank_test_percent_escape')
self.assertIn('WHERE', db_definition) # the definition is rewritten by postgres
self.assertEqual(db_comment, comment)

View file

@ -11,17 +11,17 @@ from unittest import SkipTest, skip
from unittest.mock import patch
from odoo.tests.case import TestCase
from odoo.tests.common import BaseCase, TransactionCase, users, warmup
from odoo.tests.common import BaseCase, TransactionCase, users, warmup, RegistryRLock
from odoo.tests.result import OdooTestResult
_logger = logging.getLogger(__name__)
from odoo.tests import MetaCase
# this is mainly to ensure that simple tests will continue to work even if BaseCase should be used
# this only works if doClassCleanup is available on testCase because of the vendoring of suite.py.
class TestTestSuite(TestCase, metaclass=MetaCase):
class TestTestSuite(TestCase):
test_tags = {'standard', 'at_install'}
test_module = 'base'
def test_test_suite(self):
""" Check that OdooSuite handles unittest.TestCase correctly. """
@ -93,6 +93,11 @@ class TestRunnerLoggingCommon(TransactionCase):
expected_first_frame_method = self._testMethodName
else:
expected_first_frame_method = self.expected_first_frame_methods.pop(0)
if expected_first_frame_method.endswith('_with_decorators'):
# For decorators, we don't need to have the first frame in line with
# the test name because it already appears in the stack trace.
# See odoo/odoo#108202.
return
first_frame_method = tb.tb_frame.f_code.co_name
if first_frame_method != expected_first_frame_method:
self._log_error(f"Checking first tb frame: {first_frame_method} is not equal to {expected_first_frame_method}")
@ -188,17 +193,13 @@ Exception: {message}
@users('__system__')
@warmup
def test_with_decorators(self):
# note, this test may be broken with a decorator in decorator=5.0.5 since the behaviour changed
# but decoratorx was not introduced yet.
message = (
'''ERROR: Subtest TestRunnerLogging.test_with_decorators (login='__system__')
Traceback (most recent call last):
File "<decorator-gen-xxx>", line $line, in test_with_decorators
File "/root_path/odoo/odoo/tests/common.py", line $line, in _users
func(*args, **kwargs)
File "<decorator-gen-xxx>", line $line, in test_with_decorators
File "/root_path/odoo/odoo/tests/common.py", line $line, in with_users
func(self, *args, **kwargs)
File "/root_path/odoo/odoo/tests/common.py", line $line, in warmup
func(*args, **kwargs)
func(self, *args, **kwargs)
File "/root_path/odoo/odoo/addons/base/tests/test_test_suite.py", line $line, in test_with_decorators
raise Exception('This is an error')
Exception: This is an error
@ -533,3 +534,15 @@ class TestSkipMethof(BaseCase):
@skip
def test_skip_method(self):
raise Exception('This should be skipped')
class TestRegistryRLock(BaseCase):
def test_registry_rlock_count(self):
lock = RegistryRLock()
for i in range(5):
self.assertEqual(lock.count, i)
lock.acquire()
for i in range(5):
self.assertEqual(lock.count, 5 - i)
lock.release()

View file

@ -17,7 +17,6 @@ class TestSetTags(TransactionCase):
fc = FakeClass()
self.assertTrue(hasattr(fc, 'test_tags'))
self.assertEqual(fc.test_tags, {'at_install', 'standard'})
self.assertEqual(fc.test_module, 'base')
@ -29,7 +28,6 @@ class TestSetTags(TransactionCase):
fc = FakeClass()
self.assertTrue(hasattr(fc, 'test_tags'))
self.assertEqual(fc.test_tags, {'at_install', 'standard'})
self.assertEqual(fc.test_module, 'base')
@ -165,7 +163,7 @@ class TestSelector(TransactionCase):
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('/module/tests/test_file.py') # all standard test of a module
self.assertEqual({('standard', None, None, None, 'module.tests.test_file'), }, tags.include)
self.assertEqual({('standard', None, None, None, '/module/tests/test_file.py'), }, tags.include)
self.assertEqual(set(), tags.exclude)
tags = TagsSelector('*/module') # all tests of a module
@ -212,6 +210,17 @@ class TestSelector(TransactionCase):
self.assertEqual({(None, 'module', None, None, None), }, tags.include) # all in module
self.assertEqual({('standard', None, None, None, None), }, tags.exclude) # exept standard ones
tags = TagsSelector('*/some-paths/with-dash/addons/account/test/test_file.py') # a filepath with dashes
self.assertEqual({(None, None, None, None, '/some-paths/with-dash/addons/account/test/test_file.py'), }, tags.include)
tags = TagsSelector('/some/absolute/path/v.3/module.py')
self.assertEqual({('standard', None, None, None, '/some/absolute/path/v.3/module.py'), }, tags.include) # all in module
tags = TagsSelector('/some/absolute/path/v.3/module.py')
self.assertEqual({('standard', None, None, None, '/some/absolute/path/v.3/module.py'), }, tags.include) # all in module
tags = TagsSelector('/module.method')
self.assertEqual({('standard', 'module', None, 'method', None), }, tags.include) # all in module
@tagged('nodatabase')
class TestSelectorSelection(TransactionCase):
@ -355,6 +364,21 @@ class TestSelectorSelection(TransactionCase):
position = TagsSelector('post_install')
self.assertTrue(tags.check(post_install_obj) and position.check(post_install_obj))
# module part
tags = TagsSelector('/base')
self.assertTrue(tags.check(no_tags_obj), 'Test should match is module path')
tags = TagsSelector('/base/tests/test_tests_tags.py')
self.assertTrue(tags.check(no_tags_obj), 'Test should match is module path with file')
tags = TagsSelector('/account/tests/test_tests_tags.py')
self.assertFalse(tags.check(no_tags_obj), 'Test should not match another module path with file')
# absolute path case (used by test-file)
tags = TagsSelector(__file__) # todo fix if . in path
self.assertTrue(tags.check(no_tags_obj), 'Test should match its absolute file path')
tags = TagsSelector(__file__)
self.assertTrue(tags.check(no_tags_obj), 'Test should its absolute file path')
def test_selector_parser_parameters(self):
tags = ','.join([
'/base:FakeClassA[failfast=0,filter=-livechat]',

View file

@ -309,6 +309,39 @@ class TranslationToolsTestCase(BaseCase):
f'translation {invalid!r} has non-translatable elements(elements not in TRANSLATED_ELEMENTS)',
)
def test_translate_xml_fstring(self):
""" Test xml_translate() with formated string (ruby or jinja). """
terms = []
source = """<t t-name="stuff">
<t t-set="first" t-value="33"/>
<t t-set="second" t-valuef="no-translate-{{first}}"/>
<t t-set="toto" t-valuef.translate="My ro-{{first}}"/>
<span t-attf-title="Big #{toto} first {{first}} second:{{second}}"/>
<div data-stuff.translate="cat"/>
</t>"""
result = xml_translate(terms.append, source)
self.assertEqual(result, source)
self.assertItemsEqual(terms,
['My ro-{{0}}', 'Big {{0}} first {{1}} second:{{2}}', 'cat'])
# try to insert malicious expression
malicous = {
'My ro-{{0}}': 'Translated ro-{{0}} and {{1+1}}',
'Big {{0}} first {{1}} second:{{2}}': 'Big Translated {{0}} second ({{2}}) first {{1+1}}',
'cat': 'dog',
}
result = xml_translate(malicous.get, source)
self.assertEqual(result, """<t t-name="stuff">
<t t-set="first" t-value="33"/>
<t t-set="second" t-valuef="no-translate-{{first}}"/>
<t t-set="toto" t-valuef.translate="Translated ro-{{first}} and None"/>
<span t-attf-title="Big Translated #{toto} second ({{second}}) first None"/>
<div data-stuff.translate="dog"/>
</t>""")
def test_translate_html(self):
""" Test html_translate(). """
source = """<blockquote>A <h2>B</h2> C</blockquote>"""
@ -343,7 +376,7 @@ class TestLanguageInstall(TransactionCase):
def _load_module_terms(self, modules, langs, overwrite=False, imported_module=False):
loaded.append((modules, langs, overwrite))
with patch('odoo.addons.base.models.ir_module.Module._load_module_terms', _load_module_terms):
with patch('odoo.addons.base.models.ir_module.IrModuleModule._load_module_terms', _load_module_terms):
wizard.lang_install()
# _load_module_terms is called once with lang='fr_FR' and overwrite=True
@ -1134,7 +1167,7 @@ class TestXMLTranslation(TransactionCase):
</div>
<div class="s_table_of_content_main" data-name="Content">
<section class="pb16">
<h1 data-anchor="true" class="o_default_snippet_text" id="table_of_content_heading_1672668075678_4">%s</h1>
<h1 data-anchor="true" id="table_of_content_heading_1672668075678_4">%s</h1>
</section>
</div>
</form>'''
@ -1184,7 +1217,7 @@ class TestXMLTranslation(TransactionCase):
</div>
<div class="s_table_of_content_main" data-name="Content">
<section class="pb16">
<h1 data-anchor="true" class="o_default_snippet_text" id="table_of_content_heading_1672668075678_4">%s</h1>
<h1 data-anchor="true" id="table_of_content_heading_1672668075678_4">%s</h1>
</section>
</div>
</form>'''
@ -1784,7 +1817,7 @@ class TestLanguageInstallPerformance(TransactionCase):
self.assertFalse(fr_BE.active)
t0 = time.time()
fr_BE.toggle_active()
fr_BE.action_unarchive()
t1 = time.time()
_stats_logger.info("installed language fr_BE in %.3fs", t1 - t0)

View file

@ -6,7 +6,7 @@
from contextlib import contextmanager
import unittest
from odoo import api, SUPERUSER_ID
from odoo import api
from odoo.tests import common
from odoo.tests.common import BaseCase
@ -20,7 +20,7 @@ def environment():
"""
reg = Registry(common.get_db_name())
with reg.cursor() as cr:
yield api.Environment(cr, SUPERUSER_ID, {})
yield api.Environment(cr, api.SUPERUSER_ID, {})
MODULE = 'test_uninstall'

View file

@ -1,49 +0,0 @@
import subprocess as sp
import sys
from os.path import join as opj, realpath
from odoo.tools import config
from odoo.tests import BaseCase
class TestCommand(BaseCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.odoo_bin = realpath(opj(__file__, '../../../../../odoo-bin'))
def run_command(self, *args, check=True, capture_output=True, text=True, **kwargs):
return sp.run(
[
sys.executable,
self.odoo_bin,
f'--addons-path={config["addons_path"]}',
*args,
],
capture_output=capture_output,
check=check,
text=text,
**kwargs
)
def test_upgrade_code_example(self):
proc = self.run_command('upgrade_code', '--script', '17.5-00-example', '--dry-run')
self.assertFalse(proc.stdout, "there should be no file modified by the example script")
self.assertFalse(proc.stderr)
def test_upgrade_code_help(self):
proc = self.run_command('upgrade_code', '--help')
self.assertIn("usage: ", proc.stdout)
self.assertIn("Rewrite the entire source code", proc.stdout)
self.assertFalse(proc.stderr)
def test_upgrade_code_standalone(self):
from odoo.cli import upgrade_code # noqa: PLC0415
proc = sp.run(
[sys.executable, upgrade_code.__file__, '--help'],
check=True, capture_output=True, text=True
)
self.assertIn("usage: ", proc.stdout)
self.assertIn("Rewrite the entire source code", proc.stdout)
self.assertFalse(proc.stderr)

View file

@ -22,7 +22,7 @@ class TestHasGroup(TransactionCase):
'partner_id': self.env['res.partner'].create({
'name': "Strawman Test User"
}).id,
'groups_id': [Command.set([group0.id])]
'group_ids': [Command.set([group0.id])]
})
self.grp_internal_xml_id = 'base.group_user'
@ -54,7 +54,7 @@ class TestHasGroup(TransactionCase):
)
def test_other_user(self):
internal_user = self.test_user.copy({'groups_id': self.grp_internal})
internal_user = self.test_user.copy({'group_ids': self.grp_internal})
internal_user = internal_user.with_user(internal_user)
test_user = self.env['res.users'].with_user(self.test_user).browse(self.test_user.id)
@ -71,7 +71,6 @@ class TestHasGroup(TransactionCase):
who would also have group_user by implied_group.
Otherwise, it succeeds with the groups we asked for.
"""
grp_public = self.env.ref('base.group_public')
grp_test_portal_xml_id = 'test_user_has_group.portal_implied_group'
grp_test_portal = self.env['res.groups']._load_records([
dict(xml_id=grp_test_portal_xml_id, values={'name': 'Test Group Portal'})
@ -83,6 +82,7 @@ class TestHasGroup(TransactionCase):
grp_test_internal2 = self.env['res.groups']._load_records([
dict(xml_id=grp_test_internal2_xml_id, values={'name': 'Test Group Internal 2'})
])
self.grp_portal.implied_ids = grp_test_portal
grp_test_internal1.implied_ids = False
@ -91,8 +91,7 @@ class TestHasGroup(TransactionCase):
portal_user = self.env['res.users'].create({
'login': 'portalTest',
'name': 'Portal test',
'sel_groups_%s_%s_%s' % (self.grp_internal.id, self.grp_portal.id, grp_public.id): self.grp_portal.id,
'sel_groups_%s_%s' % (grp_test_internal1.id, grp_test_internal2.id): grp_test_internal2.id,
'group_ids': [self.grp_portal.id, grp_test_internal2.id],
})
self.assertTrue(
@ -121,8 +120,7 @@ class TestHasGroup(TransactionCase):
portal_user = self.env['res.users'].create({
'login': 'portalFail',
'name': 'Portal fail',
'sel_groups_%s_%s_%s' % (self.grp_internal.id, self.grp_portal.id, grp_public.id): self.grp_portal.id,
'sel_groups_%s_%s' % (grp_test_internal1.id, grp_test_internal2.id): grp_test_internal2.id,
'group_ids': [self.grp_portal.id, grp_test_internal2.id],
})
def test_portal_write(self):
@ -135,11 +133,12 @@ class TestHasGroup(TransactionCase):
portal_user = self.env['res.users'].create({
'login': 'portalTest2',
'name': 'Portal test 2',
'groups_id': [Command.set([self.grp_portal.id])],
'group_ids': [Command.set([self.grp_portal.id])],
})
self.assertEqual(portal_user.group_ids, self.grp_portal)
self.assertEqual(
portal_user.groups_id, (self.grp_portal + grp_test_portal),
portal_user.all_group_ids, (self.grp_portal + grp_test_portal),
"The portal user should have the implied group.",
)
@ -147,7 +146,7 @@ class TestHasGroup(TransactionCase):
{"name": "fail", "implied_ids": [Command.set([self.grp_internal.id])]})
with self.assertRaises(ValidationError):
portal_user.write({'groups_id': [Command.link(grp_fail.id)]})
portal_user.write({'group_ids': [Command.link(grp_fail.id)]})
def test_two_user_types(self):
#Create a user with two groups of user types kind (Internal and Portal)
@ -161,17 +160,17 @@ class TestHasGroup(TransactionCase):
self.env['res.users'].create({
'login': 'test_two_user_types',
'name': "Test User with two user types",
'groups_id': [Command.set([grp_test.id])]
'group_ids': [Command.set([grp_test.id])]
})
#Add a user with portal to the group Internal
test_user = self.env['res.users'].create({
'login': 'test_user_portal',
'name': "Test User with two user types",
'groups_id': [Command.set([self.grp_portal.id])]
'group_ids': [Command.set([self.grp_portal.id])]
})
with self.assertRaises(ValidationError):
self.grp_internal.users = [Command.link(test_user.id)]
self.grp_internal.user_ids = [Command.link(test_user.id)]
def test_two_user_types_implied_groups(self):
"""Contrarily to test_two_user_types, we simply add an implied_id to a group.
@ -185,10 +184,10 @@ class TestHasGroup(TransactionCase):
test_user = self.env['res.users'].create({
'login': 'test_user_portal',
'name': "Test User with one user types",
'groups_id': [Command.set([grp_test.id])]
'group_ids': [Command.set([grp_test.id])]
})
with self.assertRaisesRegex(ValidationError, "The user cannot have more than one user types"), self.env.cr.savepoint():
with self.assertRaises(ValidationError, msg="Test user belongs to two user types."):
grp_test.write({'implied_ids': [Command.link(self.grp_portal.id)]})
self.env["ir.model.fields"].create(
@ -196,9 +195,9 @@ class TestHasGroup(TransactionCase):
"name": "x_group_names",
"model_id": self.env.ref("base.model_res_users").id,
"state": "manual",
"field_description": "A computed field that depends on groups_id",
"compute": "for r in self: r['x_group_names'] = ', '.join(r.groups_id.mapped('name'))",
"depends": "groups_id",
"field_description": "A computed field that depends on all_group_ids",
"compute": "for r in self: r['x_group_names'] = ', '.join(r.all_group_ids.mapped('name'))",
"depends": "all_group_ids",
"store": True,
"ttype": "char",
}
@ -209,8 +208,8 @@ class TestHasGroup(TransactionCase):
"model_id": self.env.ref("base.model_res_groups").id,
"state": "manual",
"field_description": "A computed field that depends on users",
"compute": "for r in self: r['x_user_names'] = ', '.join(r.users.mapped('name'))",
"depends": "users",
"compute": "for r in self: r['x_user_names'] = ', '.join(r.all_user_ids.mapped('name'))",
"depends": "all_user_ids",
"store": True,
"ttype": "char",
}
@ -228,29 +227,44 @@ class TestHasGroup(TransactionCase):
"""
group_0 = self.env.ref(self.group0) # the group to which test_user already belongs
group_U = self.env["res.groups"].create({"name": "U", "implied_ids": [Command.set([self.grp_internal.id])]})
self.grp_internal.implied_ids = False # only there to simplify the test by not having to care about its trans_implied_ids
self.test_user.write({'groups_id': [Command.link(group_U.id)]})
self.grp_internal.implied_ids = False # only there to simplify the test
self.assertEqual(self.test_user.group_ids, group_0)
self.assertEqual(self.test_user.all_group_ids, group_0)
self.test_user.write({'group_ids': [Command.link(group_U.id)]})
self.assertEqual(
self.test_user.groups_id, (group_0 + group_U + self.grp_internal),
self.test_user.group_ids, (group_0 + group_U),
"We should have our 2 groups",
)
self.assertEqual(
self.test_user.all_group_ids, (group_0 + group_U + self.grp_internal),
"We should have our 2 groups and the implied user group",
)
with self.assertRaises(ValidationError):
# A group may be (transitively) implying group_user or a portal, then it would raise an exception
self.test_user.write({'group_ids': [
Command.unlink(self.grp_internal.id),
Command.unlink(self.grp_public.id),
Command.link(self.grp_portal.id),
]})
# Now we demote him. The JS framework sends 3 and 4 commands,
# which is what we write here, but it should work even with a 5 command or whatever.
self.test_user.write({'groups_id': [
Command.unlink(self.grp_internal.id),
self.test_user.write({'group_ids': [
Command.unlink(group_U.id),
Command.unlink(self.grp_public.id),
Command.link(self.grp_portal.id),
]})
# if we screw up the removing groups/adding the implied ids, we could end up in two situations:
# 1. we have a portal user with way too much rights (e.g. 'Contact Creation', which does not imply any other group)
# 2. because a group may be (transitively) implying group_user, then it would raise an exception
# so as a compromise we remove all groups when demoting a user
# (even technical display groups, e.g. TaxB2B, which could be re-added later)
# 2. a group may be (transitively) implying group_user or a portal, then it would raise an exception
self.assertEqual(
self.test_user.groups_id, (self.grp_portal),
self.test_user.all_group_ids, (group_0 + self.grp_portal),
"Here the portal group does not imply any other group, so we should only have this group.",
)
@ -274,26 +288,31 @@ class TestHasGroup(TransactionCase):
# as well as 'implied_groups'; otherwise nothing else should happen.
# By contrast, for a portal user we want implied groups not to be added
# if and only if it would not give group_user (or group_public) privileges
user_a = U.create({"name": "a", "login": "a", "groups_id": [Command.set([group_AA.id, group_user.id])]})
self.assertEqual(user_a.groups_id, (group_AA + group_A + group_user + group_no_one))
user_a = U.create({"name": "a", "login": "a", "group_ids": [Command.set([group_AA.id, group_user.id])]})
self.assertEqual(user_a.all_group_ids, (group_AA + group_A + group_user + group_no_one))
self.assertEqual(user_a.group_ids, (group_AA + group_user))
user_b = U.create({"name": "b", "login": "b", "groups_id": [Command.set([group_portal.id, group_AA.id])]})
self.assertEqual(user_b.groups_id, (group_AA + group_A + group_portal))
user_b = U.create({"name": "b", "login": "b", "group_ids": [Command.set([group_portal.id, group_AA.id])]})
self.assertEqual(user_b.all_group_ids, (group_AA + group_A + group_portal))
self.assertEqual(user_b.group_ids, (group_AA + group_portal))
# user_b is not an internal user, but giving it a new group just added a new group
(user_a + user_b).write({"groups_id": [Command.link(group_BB.id)]})
self.assertEqual(user_a.groups_id, (group_AA + group_A + group_BB + group_B + group_user + group_no_one))
self.assertEqual(user_b.groups_id, (group_AA + group_A + group_BB + group_B + group_portal))
(user_a + user_b).write({"group_ids": [Command.link(group_BB.id)]})
self.assertEqual(user_a.all_group_ids, (group_AA + group_A + group_BB + group_B + group_user + group_no_one))
self.assertEqual(user_b.all_group_ids, (group_AA + group_A + group_BB + group_B + group_portal))
self.assertEqual(user_a.group_ids, (group_AA + group_BB + group_user))
self.assertEqual(user_b.group_ids, (group_AA + group_BB + group_portal))
# now we create a group that implies the group_user
# adding it to a user should work normally, whereas adding it to a portal user should raise
group_C = G.create({"name": "C", "implied_ids": [Command.set([group_user.id])]})
user_a.write({"groups_id": [Command.link(group_C.id)]})
self.assertEqual(user_a.groups_id, (group_AA + group_A + group_BB + group_B + group_C + group_user + group_no_one))
user_a.write({"group_ids": [Command.link(group_C.id)]})
self.assertEqual(user_a.all_group_ids, (group_AA + group_A + group_BB + group_B + group_C + group_user + group_no_one))
self.assertEqual(user_a.group_ids, (group_AA + group_BB + group_C + group_user))
with self.assertRaises(ValidationError):
user_b.write({"groups_id": [Command.link(group_C.id)]})
user_b.write({"group_ids": [Command.link(group_C.id)]})
def test_has_group_cleared_cache_on_write(self):
self.env.registry.clear_cache()

View file

@ -1,275 +0,0 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import collections
import datetime
import time
from odoo.exceptions import AccessDenied, AccessError
from odoo.http import _request_stack
import odoo
import odoo.tools
from odoo.tests import common
from odoo.service import common as auth, model
from odoo.tools import DotDict
from odoo.api import call_kw
@common.tagged('post_install', '-at_install')
class TestXMLRPC(common.HttpCase):
def setUp(self):
super(TestXMLRPC, self).setUp()
self.admin_uid = self.env.ref('base.user_admin').id
def xmlrpc(self, model, method, *args, **kwargs):
return self.xmlrpc_object.execute_kw(
common.get_db_name(), self.admin_uid, 'admin',
model, method, args, kwargs
)
def test_01_xmlrpc_login(self):
""" Try to login on the common service. """
db_name = common.get_db_name()
uid = self.xmlrpc_common.login(db_name, 'admin', 'admin')
self.assertEqual(uid, self.admin_uid)
def test_xmlrpc_ir_model_search(self):
""" Try a search on the object service. """
o = self.xmlrpc_object
db_name = common.get_db_name()
ids = o.execute(db_name, self.admin_uid, 'admin', 'ir.model', 'search', [])
self.assertIsInstance(ids, list)
ids = o.execute(db_name, self.admin_uid, 'admin', 'ir.model', 'search', [], {})
self.assertIsInstance(ids, list)
def test_xmlrpc_datetime(self):
""" Test that native datetime can be sent over xmlrpc
"""
m = self.env.ref('base.model_res_device_log')
self.env['ir.model.access'].create({
'name': "w/e",
'model_id': m.id,
'perm_read': True,
'perm_create': True,
})
now = datetime.datetime.now()
ids = self.xmlrpc(
'res.device.log', 'create',
{'session_identifier': "abc", 'first_activity': now}
)
[r] = self.xmlrpc(
'res.device.log', 'read',
ids, ['first_activity'],
)
self.assertEqual(r['first_activity'], now.isoformat(" ", "seconds"))
def test_xmlrpc_read_group(self):
groups = self.xmlrpc_object.execute(
common.get_db_name(), self.admin_uid, 'admin',
'res.partner', 'read_group', [], ['is_company', 'color'], ['parent_id']
)
def test_xmlrpc_name_search(self):
self.xmlrpc_object.execute(
common.get_db_name(), self.admin_uid, 'admin',
'res.partner', 'name_search', "admin"
)
def test_xmlrpc_html_field(self):
sig = '<p>bork bork bork <span style="font-weight: bork">bork</span><br></p>'
r = self.env['res.users'].create({
'name': 'bob',
'login': 'bob',
'signature': sig
})
self.assertEqual(str(r.signature), sig)
[x] = self.xmlrpc('res.users', 'read', r.id, ['signature'])
self.assertEqual(x['signature'], sig)
def test_xmlrpc_frozendict_marshalling(self):
""" Test that the marshalling of a frozendict object works properly over XMLRPC """
self.env.ref('base.user_admin').tz = "Europe/Brussels"
ctx = self.xmlrpc_object.execute(
common.get_db_name(), self.admin_uid, 'admin',
'res.users', 'context_get',
)
self.assertEqual(ctx['lang'], 'en_US')
self.assertEqual(ctx['tz'], 'Europe/Brussels')
def test_xmlrpc_defaultdict_marshalling(self):
"""
Test that the marshalling of a collections.defaultdict object
works properly over XMLRPC
"""
self.patch(self.registry['res.users'], 'context_get',
odoo.api.model(lambda *_: collections.defaultdict(int)))
self.assertEqual(self.xmlrpc('res.users', 'context_get'), {})
def test_xmlrpc_remove_control_characters(self):
record = self.env['res.users'].create({
'name': 'bob with a control character: \x03',
'login': 'bob',
})
self.assertEqual(record.name, 'bob with a control character: \x03')
[record_data] = self.xmlrpc('res.users', 'read', record.id, ['name'])
self.assertEqual(record_data['name'], 'bob with a control character: ')
def test_jsonrpc_read_group(self):
self._json_call(
common.get_db_name(), self.admin_uid, 'admin',
'res.partner', 'read_group', [], ['is_company', 'color'], ['parent_id']
)
def test_jsonrpc_name_search(self):
# well that's some sexy sexy call right there
self._json_call(
common.get_db_name(),
self.admin_uid, 'admin',
'res.partner', 'name_search', 'admin'
)
def _json_call(self, *args):
self.opener.post(f"{self.base_url()}/jsonrpc", json={
'jsonrpc': '2.0',
'id': None,
'method': 'call',
'params': {
'service': 'object',
'method': 'execute',
'args': args
}
})
def test_xmlrpc_attachment_raw(self):
ids = self.env['ir.attachment'].create({'name': 'n', 'raw': b'\x01\x09'}).ids
[att] = self.xmlrpc_object.execute(
common.get_db_name(), self.admin_uid, 'admin',
'ir.attachment', 'read', ids, ['raw'])
self.assertEqual(att['raw'], '\t',
"on read, binary data should be decoded as a string and stripped from control character")
# really just for the test cursor
@common.tagged('post_install', '-at_install')
class TestAPIKeys(common.HttpCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._user = cls.env['res.users'].create({
'name': "Bylan",
'login': 'byl',
'password': 'ananananan',
'tz': 'Australia/Eucla',
})
def setUp(self):
super().setUp()
def get_json_data():
raise ValueError("There is no json here")
# needs a fake request in order to call methods protected with check_identity
fake_req = DotDict({
# various things go and access request items
'httprequest': DotDict({
'environ': {'REMOTE_ADDR': 'localhost'},
'cookies': {},
'args': {},
}),
'cookies': {},
# bypass check_identity flow
'session': {'identity-check-last': time.time()},
'geoip': {},
'get_json_data': get_json_data,
})
_request_stack.push(fake_req)
self.addCleanup(_request_stack.pop)
def test_trivial(self):
uid = auth.dispatch('authenticate', [self.env.cr.dbname, 'byl', 'ananananan', {}])
self.assertEqual(uid, self._user.id)
ctx = model.dispatch('execute_kw', [
self.env.cr.dbname, uid, 'ananananan',
'res.users', 'context_get', []
])
self.assertEqual(ctx['tz'], 'Australia/Eucla')
def test_wrongpw(self):
# User.authenticate raises but RPC.authenticate returns False
uid = auth.dispatch('authenticate', [self.env.cr.dbname, 'byl', 'aws', {}])
self.assertFalse(uid)
with self.assertRaises(AccessDenied):
model.dispatch('execute_kw', [
self.env.cr.dbname, self._user.id, 'aws',
'res.users', 'context_get', []
])
def test_key(self):
env = self.env(user=self._user)
r = env['res.users.apikeys.description'].create({
'name': 'a',
}).make_key()
k = r['context']['default_key']
uid = auth.dispatch('authenticate', [self.env.cr.dbname, 'byl', 'ananananan', {}])
self.assertEqual(uid, self._user.id)
uid = auth.dispatch('authenticate', [self.env.cr.dbname, 'byl', k, {}])
self.assertEqual(uid, self._user.id)
ctx = model.dispatch('execute_kw', [
self.env.cr.dbname, uid, k,
'res.users', 'context_get', []
])
self.assertEqual(ctx['tz'], 'Australia/Eucla')
api_key = call_kw(
model=self.env['res.users.apikeys.description'],
name='create',
args=[{'name': 'Name of the key'}],
kwargs={}
)
self.assertTrue(isinstance(api_key, int))
def test_delete(self):
env = self.env(user=self._user)
env['res.users.apikeys.description'].create({'name': 'b',}).make_key()
env['res.users.apikeys.description'].create({'name': 'b',}).make_key()
env['res.users.apikeys.description'].create({'name': 'b',}).make_key()
k0, k1, k2 = env['res.users.apikeys'].search([])
# user can remove their own keys
k0.remove()
self.assertFalse(k0.exists())
# admin can remove user keys
k1.with_user(self.env.ref('base.user_admin')).remove ()
self.assertFalse(k1.exists())
# other user can't remove user keys
u = self.env['res.users'].create({
'name': 'a',
'login': 'a',
'groups_id': self.env.ref('base.group_user').ids,
})
with self.assertRaises(AccessError):
k2.with_user(u).remove()
def test_disabled(self):
env = self.env(user=self._user)
k = env['res.users.apikeys.description'].create({'name': 'b',}).make_key()['context']['default_key']
self._user.active = False
with self.assertRaises(AccessDenied):
model.dispatch('execute_kw', [
self.env.cr.dbname, self._user.id, 'ananananan',
'res.users', 'context_get', []
])
with self.assertRaises(AccessDenied):
model.dispatch('execute_kw', [
self.env.cr.dbname, self._user.id, k,
'res.users', 'context_get', []
])