mirror of
https://github.com/bringout/oca-technical.git
synced 2026-04-19 17:32:02 +02:00
Initial commit: OCA Technical packages (595 packages)
This commit is contained in:
commit
2cc02aac6e
24950 changed files with 2318079 additions and 0 deletions
|
|
@ -0,0 +1,10 @@
|
|||
from . import test_runner_channels
|
||||
from . import test_runner_runner
|
||||
from . import test_delayable
|
||||
from . import test_delayable_split
|
||||
from . import test_json_field
|
||||
from . import test_model_job_channel
|
||||
from . import test_model_job_function
|
||||
from . import test_queue_job_protected_write
|
||||
from . import test_wizards
|
||||
from . import test_requeue_dead_job
|
||||
458
odoo-bringout-oca-queue-queue_job/queue_job/tests/common.py
Normal file
458
odoo-bringout-oca-queue-queue_job/queue_job/tests/common.py
Normal file
|
|
@ -0,0 +1,458 @@
|
|||
# Copyright 2019 Camptocamp
|
||||
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
|
||||
import doctest
|
||||
import logging
|
||||
import sys
|
||||
import typing
|
||||
from contextlib import contextmanager
|
||||
from itertools import groupby
|
||||
from operator import attrgetter
|
||||
from unittest import TestCase, mock
|
||||
|
||||
from odoo.addons.queue_job.delay import Graph
|
||||
|
||||
# pylint: disable=odoo-addons-relative-import
|
||||
from odoo.addons.queue_job.job import Job
|
||||
|
||||
|
||||
@contextmanager
|
||||
def trap_jobs():
|
||||
"""Context Manager used to test enqueuing of jobs
|
||||
|
||||
Trapping jobs allows to split the tests in:
|
||||
|
||||
* the part that delays the job with the expected arguments in one test
|
||||
* the execution of the job itself in a second test
|
||||
|
||||
When the jobs are trapped, they are not executed at all, however, we
|
||||
can verify they have been enqueued with the correct arguments and
|
||||
properties.
|
||||
|
||||
Then in a second test, we can call the job method directly with the
|
||||
arguments to test.
|
||||
|
||||
The context manager yields a instance of ``JobsTrap``, which provides
|
||||
utilities and assert methods.
|
||||
|
||||
Example of method to test::
|
||||
|
||||
def button_that_uses_delayable_chain(self):
|
||||
delayables = chain(
|
||||
self.delayable(
|
||||
channel="root.test",
|
||||
description="Test",
|
||||
eta=15,
|
||||
identity_key=identity_exact,
|
||||
max_retries=1,
|
||||
priority=15,
|
||||
).testing_method(1, foo=2),
|
||||
self.delayable().testing_method('x', foo='y'),
|
||||
self.delayable().no_description(),
|
||||
)
|
||||
delayables.delay()
|
||||
|
||||
Example of usage in a test::
|
||||
|
||||
with trap_jobs() as trap:
|
||||
self.env['test.queue.job'].button_that_uses_delayable_chain()
|
||||
|
||||
trap.assert_jobs_count(3)
|
||||
trap.assert_jobs_count(
|
||||
2, only=self.env['test.queue.job'].testing_method
|
||||
|
||||
)
|
||||
trap.assert_jobs_count(
|
||||
1, only=self.env['test.queue.job'].no_description
|
||||
)
|
||||
|
||||
trap.assert_enqueued_job(
|
||||
self.env['test.queue.job'].testing_method,
|
||||
args=(1,),
|
||||
kwargs={"foo": 2},
|
||||
properties=dict(
|
||||
channel="root.test",
|
||||
description="Test",
|
||||
eta=15,
|
||||
identity_key=identity_exact,
|
||||
max_retries=1,
|
||||
priority=15,
|
||||
)
|
||||
)
|
||||
trap.assert_enqueued_job(
|
||||
self.env['test.queue.job'].testing_method,
|
||||
args=("x",),
|
||||
kwargs={"foo": "y"},
|
||||
)
|
||||
trap.assert_enqueued_job(
|
||||
self.env['test.queue.job'].no_description,
|
||||
)
|
||||
|
||||
# optionally, you can perform the jobs synchronously (without going
|
||||
# to the database)
|
||||
jobs_tester.perform_enqueued_jobs()
|
||||
"""
|
||||
with mock.patch(
|
||||
"odoo.addons.queue_job.delay.Job",
|
||||
name="Job Class",
|
||||
auto_spec=True,
|
||||
unsafe=True,
|
||||
) as job_cls_mock:
|
||||
with JobsTrap(job_cls_mock) as trap:
|
||||
yield trap
|
||||
|
||||
|
||||
class JobCall(typing.NamedTuple):
|
||||
method: typing.Callable
|
||||
args: tuple
|
||||
kwargs: dict
|
||||
properties: dict
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, JobCall):
|
||||
return NotImplemented
|
||||
return (
|
||||
self.method.__self__ == other.method.__self__
|
||||
and self.method.__func__ == other.method.__func__
|
||||
and self.args == other.args
|
||||
and self.kwargs == other.kwargs
|
||||
and self.properties == other.properties
|
||||
)
|
||||
|
||||
|
||||
class JobsTrap:
|
||||
"""Used by ``trap_jobs()``, provide assert methods on the trapped jobs
|
||||
|
||||
Look the documentation of ``trap_jobs()`` for a usage example.
|
||||
|
||||
The ``store`` method of the Job instances is mocked so they are never
|
||||
saved in database.
|
||||
|
||||
Helpers for tests:
|
||||
|
||||
* ``jobs_count``
|
||||
* ``assert_jobs_count``
|
||||
* ``assert_enqueued_job``
|
||||
* ``perform_enqueued_jobs``
|
||||
|
||||
You can also access the list of calls that were made to enqueue the jobs in
|
||||
the ``calls`` attribute, and the generated jobs in the ``enqueued_jobs``.
|
||||
"""
|
||||
|
||||
def __init__(self, job_mock):
|
||||
self.job_mock = job_mock
|
||||
self.job_mock.side_effect = self._add_job
|
||||
# 1 call == 1 job, they share the same position in the lists
|
||||
self.calls = []
|
||||
self.enqueued_jobs = []
|
||||
self._store_patchers = []
|
||||
self._test_case = TestCase()
|
||||
|
||||
def jobs_count(self, only=None):
|
||||
"""Return the count of enqueued jobs
|
||||
|
||||
``only`` is an option method on which the count is filtered
|
||||
"""
|
||||
if only:
|
||||
return len(self._filtered_enqueued_jobs(only))
|
||||
return len(self.enqueued_jobs)
|
||||
|
||||
def assert_jobs_count(self, expected, only=None):
|
||||
"""Raise an assertion error if the count of enqueued jobs does not match
|
||||
|
||||
``only`` is an option method on which the count is filtered
|
||||
"""
|
||||
self._test_case.assertEqual(self.jobs_count(only=only), expected)
|
||||
|
||||
def assert_enqueued_job(self, method, args=None, kwargs=None, properties=None):
|
||||
"""Raise an assertion error if the expected method has not been enqueued
|
||||
|
||||
* ``method`` is the method (as method object) delayed as job
|
||||
* ``args`` is a tuple of arguments passed to the job method
|
||||
* ``kwargs`` is a dict of keyword arguments passed to the job method
|
||||
* ``properties`` is a dict of job properties (priority, eta, ...)
|
||||
|
||||
The args and the kwargs *must* be match exactly what has been enqueued
|
||||
in the job method. The properties are optional: if the job has been
|
||||
enqueued with a custom description but the assert method is not called
|
||||
with ``description`` in the properties, it still matches the call.
|
||||
However, if a ``description`` is passed in the assert's properties, it
|
||||
must match.
|
||||
"""
|
||||
if properties is None:
|
||||
properties = {}
|
||||
if args is None:
|
||||
args = ()
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
expected_call = JobCall(
|
||||
method=method,
|
||||
args=args,
|
||||
kwargs=kwargs,
|
||||
properties=properties,
|
||||
)
|
||||
actual_calls = []
|
||||
for call in self.calls:
|
||||
checked_properties = {
|
||||
key: value
|
||||
for key, value in call.properties.items()
|
||||
if key in properties
|
||||
}
|
||||
# build copy of calls with only the properties that we want to
|
||||
# check
|
||||
actual_calls.append(
|
||||
JobCall(
|
||||
method=call.method,
|
||||
args=call.args,
|
||||
kwargs=call.kwargs,
|
||||
properties=checked_properties,
|
||||
)
|
||||
)
|
||||
|
||||
if expected_call not in actual_calls:
|
||||
raise AssertionError(
|
||||
"Job %s was not enqueued.\n"
|
||||
"Actual enqueued jobs:\n%s"
|
||||
% (
|
||||
self._format_job_call(expected_call),
|
||||
"\n".join(
|
||||
" * %s" % (self._format_job_call(call),)
|
||||
for call in actual_calls
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
def perform_enqueued_jobs(self):
|
||||
"""Perform the enqueued jobs synchronously"""
|
||||
|
||||
def by_graph(job):
|
||||
return job.graph_uuid or ""
|
||||
|
||||
sorted_jobs = sorted(self.enqueued_jobs, key=by_graph)
|
||||
self.enqueued_jobs = []
|
||||
for graph_uuid, jobs in groupby(sorted_jobs, key=by_graph):
|
||||
if graph_uuid:
|
||||
self._perform_graph_jobs(jobs)
|
||||
else:
|
||||
self._perform_single_jobs(jobs)
|
||||
|
||||
def _perform_single_jobs(self, jobs):
|
||||
# we probably don't want to replicate a perfect order here, but at
|
||||
# least respect the priority
|
||||
for job in sorted(jobs, key=attrgetter("priority")):
|
||||
job.perform()
|
||||
|
||||
def _perform_graph_jobs(self, jobs):
|
||||
graph = Graph()
|
||||
for job in jobs:
|
||||
graph.add_vertex(job)
|
||||
for parent in job.depends_on:
|
||||
graph.add_edge(parent, job)
|
||||
|
||||
for job in graph.topological_sort():
|
||||
job.perform()
|
||||
|
||||
def _add_job(self, *args, **kwargs):
|
||||
job = Job(*args, **kwargs)
|
||||
if not job.identity_key or all(
|
||||
j.identity_key != job.identity_key for j in self.enqueued_jobs
|
||||
):
|
||||
self.enqueued_jobs.append(job)
|
||||
|
||||
patcher = mock.patch.object(job, "store")
|
||||
self._store_patchers.append(patcher)
|
||||
patcher.start()
|
||||
|
||||
job_args = kwargs.pop("args", None) or ()
|
||||
job_kwargs = kwargs.pop("kwargs", None) or {}
|
||||
self.calls.append(
|
||||
JobCall(
|
||||
method=args[0],
|
||||
args=job_args,
|
||||
kwargs=job_kwargs,
|
||||
properties=kwargs,
|
||||
)
|
||||
)
|
||||
return job
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
for patcher in self._store_patchers:
|
||||
patcher.stop()
|
||||
|
||||
def _filtered_enqueued_jobs(self, job_method):
|
||||
enqueued_jobs = [
|
||||
job
|
||||
for job in self.enqueued_jobs
|
||||
if job.func.__self__ == job_method.__self__
|
||||
and job.func.__func__ == job_method.__func__
|
||||
]
|
||||
return enqueued_jobs
|
||||
|
||||
def _format_job_call(self, call):
|
||||
method_all_args = []
|
||||
if call.args:
|
||||
method_all_args.append(", ".join("%s" % (arg,) for arg in call.args))
|
||||
if call.kwargs:
|
||||
method_all_args.append(
|
||||
", ".join("%s=%s" % (key, value) for key, value in call.kwargs.items())
|
||||
)
|
||||
return "<%s>.%s(%s) with properties (%s)" % (
|
||||
call.method.__self__,
|
||||
call.method.__name__,
|
||||
", ".join(method_all_args),
|
||||
", ".join("%s=%s" % (key, value) for key, value in call.properties.items()),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.calls)
|
||||
|
||||
|
||||
class JobCounter:
|
||||
def __init__(self, env):
|
||||
super().__init__()
|
||||
self.env = env
|
||||
self.existing = self.search_all()
|
||||
|
||||
def count_all(self):
|
||||
return len(self.search_all())
|
||||
|
||||
def count_created(self):
|
||||
return len(self.search_created())
|
||||
|
||||
def count_existing(self):
|
||||
return len(self.existing)
|
||||
|
||||
def search_created(self):
|
||||
return self.search_all() - self.existing
|
||||
|
||||
def search_all(self):
|
||||
return self.env["queue.job"].search([])
|
||||
|
||||
|
||||
class JobMixin:
|
||||
def job_counter(self):
|
||||
return JobCounter(self.env)
|
||||
|
||||
def perform_jobs(self, jobs):
|
||||
for job in jobs.search_created():
|
||||
Job.load(self.env, job.uuid).perform()
|
||||
|
||||
@contextmanager
|
||||
def trap_jobs(self):
|
||||
with trap_jobs() as trap:
|
||||
yield trap
|
||||
|
||||
|
||||
@contextmanager
|
||||
def mock_with_delay():
|
||||
"""Context Manager mocking ``with_delay()``
|
||||
|
||||
DEPRECATED: use ``trap_jobs()'``.
|
||||
|
||||
Mocking this method means we can decorrelate the tests in:
|
||||
|
||||
* the part that delay the job with the expected arguments
|
||||
* the execution of the job itself
|
||||
|
||||
The first kind of test does not need to actually create the jobs in the
|
||||
database, as we can inspect how the Mocks were called.
|
||||
|
||||
The second kind of test calls directly the method decorated by ``@job``
|
||||
with the arguments that we want to test.
|
||||
|
||||
The context manager returns 2 mocks:
|
||||
* the first allow to check that with_delay() was called and with which
|
||||
arguments
|
||||
* the second to check which job method was called and with which arguments.
|
||||
|
||||
Example of test::
|
||||
|
||||
def test_export(self):
|
||||
with mock_with_delay() as (delayable_cls, delayable):
|
||||
# inside this method, there is a call
|
||||
# partner.with_delay(priority=15).export_record('test')
|
||||
self.record.run_export()
|
||||
|
||||
# check 'with_delay()' part:
|
||||
self.assertEqual(delayable_cls.call_count, 1)
|
||||
# arguments passed in 'with_delay()'
|
||||
delay_args, delay_kwargs = delayable_cls.call_args
|
||||
self.assertEqual(
|
||||
delay_args, (self.env['res.partner'],)
|
||||
)
|
||||
self.assertDictEqual(delay_kwargs, {priority: 15})
|
||||
|
||||
# check what's passed to the job method 'export_record'
|
||||
self.assertEqual(delayable.export_record.call_count, 1)
|
||||
delay_args, delay_kwargs = delayable.export_record.call_args
|
||||
self.assertEqual(delay_args, ('test',))
|
||||
self.assertDictEqual(delay_kwargs, {})
|
||||
|
||||
An example of the first kind of test:
|
||||
https://github.com/camptocamp/connector-jira/blob/0ca4261b3920d5e8c2ae4bb0fc352ea3f6e9d2cd/connector_jira/tests/test_batch_timestamp_import.py#L43-L76 # noqa
|
||||
And the second kind:
|
||||
https://github.com/camptocamp/connector-jira/blob/0ca4261b3920d5e8c2ae4bb0fc352ea3f6e9d2cd/connector_jira/tests/test_import_task.py#L34-L46 # noqa
|
||||
|
||||
"""
|
||||
with mock.patch(
|
||||
"odoo.addons.queue_job.models.base.DelayableRecordset",
|
||||
name="DelayableRecordset",
|
||||
spec=True,
|
||||
) as delayable_cls:
|
||||
# prepare the mocks
|
||||
delayable = mock.MagicMock(name="DelayableBinding")
|
||||
delayable_cls.return_value = delayable
|
||||
yield delayable_cls, delayable
|
||||
|
||||
|
||||
class OdooDocTestCase(doctest.DocTestCase):
|
||||
"""
|
||||
We need a custom DocTestCase class in order to:
|
||||
- define test_tags to run as part of standard tests
|
||||
- output a more meaningful test name than default "DocTestCase.runTest"
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, doctest, optionflags=0, setUp=None, tearDown=None, checker=None, seq=0
|
||||
):
|
||||
super().__init__(
|
||||
doctest._dt_test,
|
||||
optionflags=optionflags,
|
||||
setUp=setUp,
|
||||
tearDown=tearDown,
|
||||
checker=checker,
|
||||
)
|
||||
self.test_sequence = seq
|
||||
|
||||
def setUp(self):
|
||||
"""Log an extra statement which test is started."""
|
||||
super().setUp()
|
||||
logging.getLogger(__name__).info("Running tests for %s", self._dt_test.name)
|
||||
|
||||
|
||||
def load_doctests(module):
|
||||
"""
|
||||
Generates a tests loading method for the doctests of the given module
|
||||
https://docs.python.org/3/library/unittest.html#load-tests-protocol
|
||||
"""
|
||||
|
||||
def load_tests(loader, tests, ignore):
|
||||
"""
|
||||
Apply the 'test_tags' attribute to each DocTestCase found by the DocTestSuite.
|
||||
Also extend the DocTestCase class trivially to fit the class teardown
|
||||
that Odoo backported for its own test classes from Python 3.8.
|
||||
"""
|
||||
if sys.version_info < (3, 8):
|
||||
doctest.DocTestCase.doClassCleanups = lambda: None
|
||||
doctest.DocTestCase.tearDown_exceptions = []
|
||||
|
||||
for idx, test in enumerate(doctest.DocTestSuite(module)):
|
||||
odoo_test = OdooDocTestCase(test, seq=idx)
|
||||
odoo_test.test_tags = {"standard", "at_install", "queue_job", "doctest"}
|
||||
tests.addTest(odoo_test)
|
||||
|
||||
return tests
|
||||
|
||||
return load_tests
|
||||
|
|
@ -0,0 +1,287 @@
|
|||
# copyright 2019 Camptocamp
|
||||
# license agpl-3.0 or later (http://www.gnu.org/licenses/agpl.html)
|
||||
|
||||
import gc
|
||||
import logging
|
||||
from unittest import mock
|
||||
|
||||
from odoo.tests import common
|
||||
|
||||
from odoo.addons.queue_job.delay import Delayable, DelayableGraph
|
||||
|
||||
|
||||
class TestDelayable(common.BaseCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.recordset = mock.MagicMock(name="recordset")
|
||||
|
||||
def test_delayable_set(self):
|
||||
# Use gc for garbage collection and use assertLogs to suppress WARNING
|
||||
with self.assertLogs("odoo.addons.queue_job.delay", level=logging.WARNING):
|
||||
dl = Delayable(self.recordset)
|
||||
dl.set(priority=15)
|
||||
self.assertEqual(dl.priority, 15)
|
||||
dl.set({"priority": 20, "description": "test"})
|
||||
self.assertEqual(dl.priority, 20)
|
||||
self.assertEqual(dl.description, "test")
|
||||
del dl
|
||||
gc.collect()
|
||||
|
||||
def test_delayable_set_unknown(self):
|
||||
# Use gc for garbage collection and use assertLogs to suppress WARNING
|
||||
with self.assertLogs("odoo.addons.queue_job.delay", level=logging.WARNING):
|
||||
dl = Delayable(self.recordset)
|
||||
with self.assertRaises(ValueError):
|
||||
dl.set(foo=15)
|
||||
del dl
|
||||
gc.collect()
|
||||
|
||||
def test_graph_add_vertex_edge(self):
|
||||
graph = DelayableGraph()
|
||||
graph.add_vertex("a")
|
||||
self.assertEqual(graph._graph, {"a": set()})
|
||||
graph.add_edge("a", "b")
|
||||
self.assertEqual(graph._graph, {"a": {"b"}, "b": set()})
|
||||
graph.add_edge("b", "c")
|
||||
self.assertEqual(graph._graph, {"a": {"b"}, "b": {"c"}, "c": set()})
|
||||
|
||||
def test_graph_vertices(self):
|
||||
graph = DelayableGraph({"a": {"b"}, "b": {"c"}, "c": set()})
|
||||
self.assertEqual(graph.vertices(), {"a", "b", "c"})
|
||||
|
||||
def test_graph_edges(self):
|
||||
graph = DelayableGraph(
|
||||
{"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()}
|
||||
)
|
||||
self.assertEqual(
|
||||
sorted(graph.edges()),
|
||||
sorted(
|
||||
[
|
||||
("a", "b"),
|
||||
("b", "c"),
|
||||
("b", "d"),
|
||||
("c", "e"),
|
||||
]
|
||||
),
|
||||
)
|
||||
|
||||
def test_graph_connect(self):
|
||||
# Use gc for garbage collection and use assertLogs to suppress WARNING
|
||||
with self.assertLogs("odoo.addons.queue_job.delay", level=logging.WARNING):
|
||||
node_tail = Delayable(self.recordset)
|
||||
node_tail2 = Delayable(self.recordset)
|
||||
node_middle = Delayable(self.recordset)
|
||||
node_top = Delayable(self.recordset)
|
||||
node_middle.on_done(node_tail)
|
||||
node_middle.on_done(node_tail2)
|
||||
node_top.on_done(node_middle)
|
||||
collected = node_top._graph._connect_graphs()
|
||||
self.assertEqual(
|
||||
collected._graph,
|
||||
{
|
||||
node_tail: set(),
|
||||
node_tail2: set(),
|
||||
node_middle: {node_tail, node_tail2},
|
||||
node_top: {node_middle},
|
||||
},
|
||||
)
|
||||
|
||||
del node_tail, node_tail2, node_middle, node_top, collected
|
||||
gc.collect()
|
||||
|
||||
def test_graph_paths(self):
|
||||
graph = DelayableGraph(
|
||||
{"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()}
|
||||
)
|
||||
paths = list(graph.paths("a"))
|
||||
self.assertEqual(sorted(paths), sorted([["a", "b", "d"], ["a", "b", "c", "e"]]))
|
||||
paths = list(graph.paths("b"))
|
||||
self.assertEqual(sorted(paths), sorted([["b", "d"], ["b", "c", "e"]]))
|
||||
paths = list(graph.paths("c"))
|
||||
self.assertEqual(paths, [["c", "e"]])
|
||||
paths = list(graph.paths("d"))
|
||||
self.assertEqual(paths, [["d"]])
|
||||
paths = list(graph.paths("e"))
|
||||
self.assertEqual(paths, [["e"]])
|
||||
|
||||
def test_graph_repr(self):
|
||||
graph = DelayableGraph(
|
||||
{"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()}
|
||||
)
|
||||
actual = repr(graph)
|
||||
expected = ["'a' → 'b' → 'c' → 'e'", "'a' → 'b' → 'd'"]
|
||||
self.assertEqual(sorted(actual.split("\n")), expected)
|
||||
|
||||
def test_graph_topological_sort(self):
|
||||
# the graph is an example from
|
||||
# https://en.wikipedia.org/wiki/Topological_sorting
|
||||
# if you want a visual representation
|
||||
graph = DelayableGraph(
|
||||
{
|
||||
5: {11},
|
||||
7: {11, 8},
|
||||
3: {8, 10},
|
||||
11: {2, 9, 10},
|
||||
2: set(),
|
||||
8: {9},
|
||||
9: set(),
|
||||
10: set(),
|
||||
}
|
||||
)
|
||||
|
||||
# these are all the pre-computed combinations that
|
||||
# respect the dependencies order
|
||||
valid_solutions = [
|
||||
[3, 5, 7, 8, 11, 2, 9, 10],
|
||||
[3, 5, 7, 8, 11, 2, 10, 9],
|
||||
[3, 5, 7, 8, 11, 9, 2, 10],
|
||||
[3, 5, 7, 8, 11, 9, 10, 2],
|
||||
[3, 5, 7, 8, 11, 10, 2, 9],
|
||||
[3, 5, 7, 8, 11, 10, 9, 2],
|
||||
[3, 5, 7, 11, 2, 8, 9, 10],
|
||||
[3, 5, 7, 11, 2, 8, 10, 9],
|
||||
[3, 5, 7, 11, 2, 10, 8, 9],
|
||||
[3, 5, 7, 11, 8, 2, 9, 10],
|
||||
[3, 5, 7, 11, 8, 2, 10, 9],
|
||||
[3, 5, 7, 11, 8, 9, 2, 10],
|
||||
[3, 5, 7, 11, 8, 9, 10, 2],
|
||||
[3, 5, 7, 11, 8, 10, 2, 9],
|
||||
[3, 5, 7, 11, 8, 10, 9, 2],
|
||||
[3, 5, 7, 11, 10, 2, 8, 9],
|
||||
[3, 5, 7, 11, 10, 8, 2, 9],
|
||||
[3, 5, 7, 11, 10, 8, 9, 2],
|
||||
[3, 7, 5, 8, 11, 2, 9, 10],
|
||||
[3, 7, 5, 8, 11, 2, 10, 9],
|
||||
[3, 7, 5, 8, 11, 9, 2, 10],
|
||||
[3, 7, 5, 8, 11, 9, 10, 2],
|
||||
[3, 7, 5, 8, 11, 10, 2, 9],
|
||||
[3, 7, 5, 8, 11, 10, 9, 2],
|
||||
[3, 7, 5, 11, 2, 8, 9, 10],
|
||||
[3, 7, 5, 11, 2, 8, 10, 9],
|
||||
[3, 7, 5, 11, 2, 10, 8, 9],
|
||||
[3, 7, 5, 11, 8, 2, 9, 10],
|
||||
[3, 7, 5, 11, 8, 2, 10, 9],
|
||||
[3, 7, 5, 11, 8, 9, 2, 10],
|
||||
[3, 7, 5, 11, 8, 9, 10, 2],
|
||||
[3, 7, 5, 11, 8, 10, 2, 9],
|
||||
[3, 7, 5, 11, 8, 10, 9, 2],
|
||||
[3, 7, 5, 11, 10, 2, 8, 9],
|
||||
[3, 7, 5, 11, 10, 8, 2, 9],
|
||||
[3, 7, 5, 11, 10, 8, 9, 2],
|
||||
[3, 7, 8, 5, 11, 2, 9, 10],
|
||||
[3, 7, 8, 5, 11, 2, 10, 9],
|
||||
[3, 7, 8, 5, 11, 9, 2, 10],
|
||||
[3, 7, 8, 5, 11, 9, 10, 2],
|
||||
[3, 7, 8, 5, 11, 10, 2, 9],
|
||||
[3, 7, 8, 5, 11, 10, 9, 2],
|
||||
[5, 3, 7, 8, 11, 2, 9, 10],
|
||||
[5, 3, 7, 8, 11, 2, 10, 9],
|
||||
[5, 3, 7, 8, 11, 9, 2, 10],
|
||||
[5, 3, 7, 8, 11, 9, 10, 2],
|
||||
[5, 3, 7, 8, 11, 10, 2, 9],
|
||||
[5, 3, 7, 8, 11, 10, 9, 2],
|
||||
[5, 3, 7, 11, 2, 8, 9, 10],
|
||||
[5, 3, 7, 11, 2, 8, 10, 9],
|
||||
[5, 3, 7, 11, 2, 10, 8, 9],
|
||||
[5, 3, 7, 11, 8, 2, 9, 10],
|
||||
[5, 3, 7, 11, 8, 2, 10, 9],
|
||||
[5, 3, 7, 11, 8, 9, 2, 10],
|
||||
[5, 3, 7, 11, 8, 9, 10, 2],
|
||||
[5, 3, 7, 11, 8, 10, 2, 9],
|
||||
[5, 3, 7, 11, 8, 10, 9, 2],
|
||||
[5, 3, 7, 11, 10, 2, 8, 9],
|
||||
[5, 3, 7, 11, 10, 8, 2, 9],
|
||||
[5, 3, 7, 11, 10, 8, 9, 2],
|
||||
[5, 7, 3, 8, 11, 2, 9, 10],
|
||||
[5, 7, 3, 8, 11, 2, 10, 9],
|
||||
[5, 7, 3, 8, 11, 9, 2, 10],
|
||||
[5, 7, 3, 8, 11, 9, 10, 2],
|
||||
[5, 7, 3, 8, 11, 10, 2, 9],
|
||||
[5, 7, 3, 8, 11, 10, 9, 2],
|
||||
[5, 7, 3, 11, 2, 8, 9, 10],
|
||||
[5, 7, 3, 11, 2, 8, 10, 9],
|
||||
[5, 7, 3, 11, 2, 10, 8, 9],
|
||||
[5, 7, 3, 11, 8, 2, 9, 10],
|
||||
[5, 7, 3, 11, 8, 2, 10, 9],
|
||||
[5, 7, 3, 11, 8, 9, 2, 10],
|
||||
[5, 7, 3, 11, 8, 9, 10, 2],
|
||||
[5, 7, 3, 11, 8, 10, 2, 9],
|
||||
[5, 7, 3, 11, 8, 10, 9, 2],
|
||||
[5, 7, 3, 11, 10, 2, 8, 9],
|
||||
[5, 7, 3, 11, 10, 8, 2, 9],
|
||||
[5, 7, 3, 11, 10, 8, 9, 2],
|
||||
[5, 7, 11, 2, 3, 8, 9, 10],
|
||||
[5, 7, 11, 2, 3, 8, 10, 9],
|
||||
[5, 7, 11, 2, 3, 10, 8, 9],
|
||||
[5, 7, 11, 3, 2, 8, 9, 10],
|
||||
[5, 7, 11, 3, 2, 8, 10, 9],
|
||||
[5, 7, 11, 3, 2, 10, 8, 9],
|
||||
[5, 7, 11, 3, 8, 2, 9, 10],
|
||||
[5, 7, 11, 3, 8, 2, 10, 9],
|
||||
[5, 7, 11, 3, 8, 9, 2, 10],
|
||||
[5, 7, 11, 3, 8, 9, 10, 2],
|
||||
[5, 7, 11, 3, 8, 10, 2, 9],
|
||||
[5, 7, 11, 3, 8, 10, 9, 2],
|
||||
[5, 7, 11, 3, 10, 2, 8, 9],
|
||||
[5, 7, 11, 3, 10, 8, 2, 9],
|
||||
[5, 7, 11, 3, 10, 8, 9, 2],
|
||||
[7, 3, 5, 8, 11, 2, 9, 10],
|
||||
[7, 3, 5, 8, 11, 2, 10, 9],
|
||||
[7, 3, 5, 8, 11, 9, 2, 10],
|
||||
[7, 3, 5, 8, 11, 9, 10, 2],
|
||||
[7, 3, 5, 8, 11, 10, 2, 9],
|
||||
[7, 3, 5, 8, 11, 10, 9, 2],
|
||||
[7, 3, 5, 11, 2, 8, 9, 10],
|
||||
[7, 3, 5, 11, 2, 8, 10, 9],
|
||||
[7, 3, 5, 11, 2, 10, 8, 9],
|
||||
[7, 3, 5, 11, 8, 2, 9, 10],
|
||||
[7, 3, 5, 11, 8, 2, 10, 9],
|
||||
[7, 3, 5, 11, 8, 9, 2, 10],
|
||||
[7, 3, 5, 11, 8, 9, 10, 2],
|
||||
[7, 3, 5, 11, 8, 10, 2, 9],
|
||||
[7, 3, 5, 11, 8, 10, 9, 2],
|
||||
[7, 3, 5, 11, 10, 2, 8, 9],
|
||||
[7, 3, 5, 11, 10, 8, 2, 9],
|
||||
[7, 3, 5, 11, 10, 8, 9, 2],
|
||||
[7, 3, 8, 5, 11, 2, 9, 10],
|
||||
[7, 3, 8, 5, 11, 2, 10, 9],
|
||||
[7, 3, 8, 5, 11, 9, 2, 10],
|
||||
[7, 3, 8, 5, 11, 9, 10, 2],
|
||||
[7, 3, 8, 5, 11, 10, 2, 9],
|
||||
[7, 3, 8, 5, 11, 10, 9, 2],
|
||||
[7, 5, 3, 8, 11, 2, 9, 10],
|
||||
[7, 5, 3, 8, 11, 2, 10, 9],
|
||||
[7, 5, 3, 8, 11, 9, 2, 10],
|
||||
[7, 5, 3, 8, 11, 9, 10, 2],
|
||||
[7, 5, 3, 8, 11, 10, 2, 9],
|
||||
[7, 5, 3, 8, 11, 10, 9, 2],
|
||||
[7, 5, 3, 11, 2, 8, 9, 10],
|
||||
[7, 5, 3, 11, 2, 8, 10, 9],
|
||||
[7, 5, 3, 11, 2, 10, 8, 9],
|
||||
[7, 5, 3, 11, 8, 2, 9, 10],
|
||||
[7, 5, 3, 11, 8, 2, 10, 9],
|
||||
[7, 5, 3, 11, 8, 9, 2, 10],
|
||||
[7, 5, 3, 11, 8, 9, 10, 2],
|
||||
[7, 5, 3, 11, 8, 10, 2, 9],
|
||||
[7, 5, 3, 11, 8, 10, 9, 2],
|
||||
[7, 5, 3, 11, 10, 2, 8, 9],
|
||||
[7, 5, 3, 11, 10, 8, 2, 9],
|
||||
[7, 5, 3, 11, 10, 8, 9, 2],
|
||||
[7, 5, 11, 2, 3, 8, 9, 10],
|
||||
[7, 5, 11, 2, 3, 8, 10, 9],
|
||||
[7, 5, 11, 2, 3, 10, 8, 9],
|
||||
[7, 5, 11, 3, 2, 8, 9, 10],
|
||||
[7, 5, 11, 3, 2, 8, 10, 9],
|
||||
[7, 5, 11, 3, 2, 10, 8, 9],
|
||||
[7, 5, 11, 3, 8, 2, 9, 10],
|
||||
[7, 5, 11, 3, 8, 2, 10, 9],
|
||||
[7, 5, 11, 3, 8, 9, 2, 10],
|
||||
[7, 5, 11, 3, 8, 9, 10, 2],
|
||||
[7, 5, 11, 3, 8, 10, 2, 9],
|
||||
[7, 5, 11, 3, 8, 10, 9, 2],
|
||||
[7, 5, 11, 3, 10, 2, 8, 9],
|
||||
[7, 5, 11, 3, 10, 8, 2, 9],
|
||||
[7, 5, 11, 3, 10, 8, 9, 2],
|
||||
]
|
||||
|
||||
self.assertIn(list(graph.topological_sort()), valid_solutions)
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
# Copyright 2024 Akretion (http://www.akretion.com).
|
||||
# @author Florian Mounier <florian.mounier@akretion.com>
|
||||
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
|
||||
|
||||
from odoo.tests import common
|
||||
|
||||
from odoo.addons.queue_job.delay import Delayable
|
||||
|
||||
|
||||
class TestDelayableSplit(common.BaseCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
class FakeRecordSet(list):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._name = "recordset"
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, slice):
|
||||
return FakeRecordSet(super().__getitem__(key))
|
||||
return super().__getitem__(key)
|
||||
|
||||
def method(self, arg, kwarg=None):
|
||||
"""Method to be called"""
|
||||
return arg, kwarg
|
||||
|
||||
self.FakeRecordSet = FakeRecordSet
|
||||
|
||||
def test_delayable_split_no_method_call_beforehand(self):
|
||||
dl = Delayable(self.FakeRecordSet(range(20)))
|
||||
with self.assertRaises(ValueError):
|
||||
dl.split(3)
|
||||
|
||||
def test_delayable_split_10_3(self):
|
||||
dl = Delayable(self.FakeRecordSet(range(10)))
|
||||
dl.method("arg", kwarg="kwarg")
|
||||
group = dl.split(3)
|
||||
self.assertEqual(len(group._delayables), 4)
|
||||
delayables = sorted(list(group._delayables), key=lambda x: x.description)
|
||||
self.assertEqual(delayables[0].recordset, self.FakeRecordSet([0, 1, 2]))
|
||||
self.assertEqual(delayables[1].recordset, self.FakeRecordSet([3, 4, 5]))
|
||||
self.assertEqual(delayables[2].recordset, self.FakeRecordSet([6, 7, 8]))
|
||||
self.assertEqual(delayables[3].recordset, self.FakeRecordSet([9]))
|
||||
self.assertEqual(delayables[0].description, "Method to be called (split 1/4)")
|
||||
self.assertEqual(delayables[1].description, "Method to be called (split 2/4)")
|
||||
self.assertEqual(delayables[2].description, "Method to be called (split 3/4)")
|
||||
self.assertEqual(delayables[3].description, "Method to be called (split 4/4)")
|
||||
self.assertNotEqual(delayables[0]._job_method, dl._job_method)
|
||||
self.assertNotEqual(delayables[1]._job_method, dl._job_method)
|
||||
self.assertNotEqual(delayables[2]._job_method, dl._job_method)
|
||||
self.assertNotEqual(delayables[3]._job_method, dl._job_method)
|
||||
self.assertEqual(delayables[0]._job_method.__name__, dl._job_method.__name__)
|
||||
self.assertEqual(delayables[1]._job_method.__name__, dl._job_method.__name__)
|
||||
self.assertEqual(delayables[2]._job_method.__name__, dl._job_method.__name__)
|
||||
self.assertEqual(delayables[3]._job_method.__name__, dl._job_method.__name__)
|
||||
self.assertEqual(delayables[0]._job_args, ("arg",))
|
||||
self.assertEqual(delayables[1]._job_args, ("arg",))
|
||||
self.assertEqual(delayables[2]._job_args, ("arg",))
|
||||
self.assertEqual(delayables[3]._job_args, ("arg",))
|
||||
self.assertEqual(delayables[0]._job_kwargs, {"kwarg": "kwarg"})
|
||||
self.assertEqual(delayables[1]._job_kwargs, {"kwarg": "kwarg"})
|
||||
self.assertEqual(delayables[2]._job_kwargs, {"kwarg": "kwarg"})
|
||||
self.assertEqual(delayables[3]._job_kwargs, {"kwarg": "kwarg"})
|
||||
|
||||
def test_delayable_split_10_5(self):
|
||||
dl = Delayable(self.FakeRecordSet(range(10)))
|
||||
dl.method("arg", kwarg="kwarg")
|
||||
group = dl.split(5)
|
||||
self.assertEqual(len(group._delayables), 2)
|
||||
delayables = sorted(list(group._delayables), key=lambda x: x.description)
|
||||
self.assertEqual(delayables[0].recordset, self.FakeRecordSet([0, 1, 2, 3, 4]))
|
||||
self.assertEqual(delayables[1].recordset, self.FakeRecordSet([5, 6, 7, 8, 9]))
|
||||
self.assertEqual(delayables[0].description, "Method to be called (split 1/2)")
|
||||
self.assertEqual(delayables[1].description, "Method to be called (split 2/2)")
|
||||
|
||||
def test_delayable_split_10_10(self):
|
||||
dl = Delayable(self.FakeRecordSet(range(10)))
|
||||
dl.method("arg", kwarg="kwarg")
|
||||
group = dl.split(10)
|
||||
self.assertEqual(len(group._delayables), 1)
|
||||
delayables = sorted(list(group._delayables), key=lambda x: x.description)
|
||||
self.assertEqual(delayables[0].recordset, self.FakeRecordSet(range(10)))
|
||||
self.assertEqual(delayables[0].description, "Method to be called (split 1/1)")
|
||||
|
||||
def test_delayable_split_10_20(self):
|
||||
dl = Delayable(self.FakeRecordSet(range(10)))
|
||||
dl.method("arg", kwarg="kwarg")
|
||||
group = dl.split(20)
|
||||
self.assertEqual(len(group._delayables), 1)
|
||||
delayables = sorted(list(group._delayables), key=lambda x: x.description)
|
||||
self.assertEqual(delayables[0].recordset, self.FakeRecordSet(range(10)))
|
||||
self.assertEqual(delayables[0].description, "Method to be called (split 1/1)")
|
||||
|
|
@ -0,0 +1,154 @@
|
|||
# copyright 2016 Camptocamp
|
||||
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
|
||||
|
||||
import json
|
||||
from datetime import date, datetime
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from odoo.tests import common
|
||||
|
||||
# pylint: disable=odoo-addons-relative-import
|
||||
# we are testing, we want to test as we were an external consumer of the API
|
||||
from odoo.addons.queue_job.fields import JobDecoder, JobEncoder
|
||||
|
||||
|
||||
class TestJson(common.TransactionCase):
|
||||
def test_encoder_recordset(self):
|
||||
demo_user = self.env.ref("base.user_demo")
|
||||
context = demo_user.context_get()
|
||||
partner = self.env(user=demo_user, context=context).ref("base.main_partner")
|
||||
value = partner
|
||||
value_json = json.dumps(value, cls=JobEncoder)
|
||||
expected_context = context.copy()
|
||||
expected_context.pop("uid")
|
||||
expected = {
|
||||
"uid": demo_user.id,
|
||||
"_type": "odoo_recordset",
|
||||
"model": "res.partner",
|
||||
"ids": [partner.id],
|
||||
"su": False,
|
||||
"context": expected_context,
|
||||
}
|
||||
self.assertEqual(json.loads(value_json), expected)
|
||||
|
||||
def test_encoder_recordset_list(self):
|
||||
demo_user = self.env.ref("base.user_demo")
|
||||
context = demo_user.context_get()
|
||||
partner = self.env(user=demo_user, context=context).ref("base.main_partner")
|
||||
value = ["a", 1, partner]
|
||||
value_json = json.dumps(value, cls=JobEncoder)
|
||||
expected_context = context.copy()
|
||||
expected_context.pop("uid")
|
||||
expected = [
|
||||
"a",
|
||||
1,
|
||||
{
|
||||
"uid": demo_user.id,
|
||||
"_type": "odoo_recordset",
|
||||
"model": "res.partner",
|
||||
"ids": [partner.id],
|
||||
"su": False,
|
||||
"context": expected_context,
|
||||
},
|
||||
]
|
||||
self.assertEqual(json.loads(value_json), expected)
|
||||
|
||||
def test_decoder_recordset(self):
|
||||
demo_user = self.env.ref("base.user_demo")
|
||||
context = demo_user.context_get()
|
||||
partner = self.env(user=demo_user).ref("base.main_partner")
|
||||
value_json = (
|
||||
'{"_type": "odoo_recordset",'
|
||||
'"model": "res.partner",'
|
||||
'"su": false,'
|
||||
'"ids": [%s],"uid": %s, '
|
||||
'"context": {"tz": "%s", "lang": "%s"}}'
|
||||
% (partner.id, demo_user.id, context["tz"], context["lang"])
|
||||
)
|
||||
expected = partner
|
||||
value = json.loads(value_json, cls=JobDecoder, env=self.env)
|
||||
self.assertEqual(value, expected)
|
||||
self.assertEqual(demo_user, expected.env.user)
|
||||
|
||||
def test_decoder_recordset_list(self):
|
||||
demo_user = self.env.ref("base.user_demo")
|
||||
context = demo_user.context_get()
|
||||
partner = self.env(user=demo_user).ref("base.main_partner")
|
||||
value_json = (
|
||||
'["a", 1, '
|
||||
'{"_type": "odoo_recordset",'
|
||||
'"model": "res.partner",'
|
||||
'"su": false,'
|
||||
'"ids": [%s],"uid": %s, '
|
||||
'"context": {"tz": "%s", "lang": "%s"}}]'
|
||||
% (partner.id, demo_user.id, context["tz"], context["lang"])
|
||||
)
|
||||
expected = ["a", 1, partner]
|
||||
value = json.loads(value_json, cls=JobDecoder, env=self.env)
|
||||
self.assertEqual(value, expected)
|
||||
self.assertEqual(demo_user, expected[2].env.user)
|
||||
|
||||
def test_decoder_recordset_list_without_user(self):
|
||||
value_json = (
|
||||
'["a", 1, {"_type": "odoo_recordset",' '"model": "res.users", "ids": [1]}]'
|
||||
)
|
||||
expected = ["a", 1, self.env.ref("base.user_root")]
|
||||
value = json.loads(value_json, cls=JobDecoder, env=self.env)
|
||||
self.assertEqual(value, expected)
|
||||
|
||||
def test_encoder_datetime(self):
|
||||
value = ["a", 1, datetime(2017, 4, 19, 8, 48, 50, 1)]
|
||||
value_json = json.dumps(value, cls=JobEncoder)
|
||||
expected = [
|
||||
"a",
|
||||
1,
|
||||
{"_type": "datetime_isoformat", "value": "2017-04-19T08:48:50.000001"},
|
||||
]
|
||||
self.assertEqual(json.loads(value_json), expected)
|
||||
|
||||
def test_decoder_datetime(self):
|
||||
value_json = (
|
||||
'["a", 1, {"_type": "datetime_isoformat",'
|
||||
'"value": "2017-04-19T08:48:50.000001"}]'
|
||||
)
|
||||
expected = ["a", 1, datetime(2017, 4, 19, 8, 48, 50, 1)]
|
||||
value = json.loads(value_json, cls=JobDecoder, env=self.env)
|
||||
self.assertEqual(value, expected)
|
||||
|
||||
def test_encoder_date(self):
|
||||
value = ["a", 1, date(2017, 4, 19)]
|
||||
value_json = json.dumps(value, cls=JobEncoder)
|
||||
expected = ["a", 1, {"_type": "date_isoformat", "value": "2017-04-19"}]
|
||||
self.assertEqual(json.loads(value_json), expected)
|
||||
|
||||
def test_decoder_date(self):
|
||||
value_json = '["a", 1, {"_type": "date_isoformat",' '"value": "2017-04-19"}]'
|
||||
expected = ["a", 1, date(2017, 4, 19)]
|
||||
value = json.loads(value_json, cls=JobDecoder, env=self.env)
|
||||
self.assertEqual(value, expected)
|
||||
|
||||
def test_encoder_etree(self):
|
||||
etree_el = etree.Element("root", attr="val")
|
||||
etree_el.append(etree.Element("child", attr="val"))
|
||||
value = ["a", 1, etree_el]
|
||||
value_json = json.dumps(value, cls=JobEncoder)
|
||||
expected = [
|
||||
"a",
|
||||
1,
|
||||
{
|
||||
"_type": "etree_element",
|
||||
"value": '<root attr="val"><child attr="val"/></root>',
|
||||
},
|
||||
]
|
||||
self.assertEqual(json.loads(value_json), expected)
|
||||
|
||||
def test_decoder_etree(self):
|
||||
value_json = '["a", 1, {"_type": "etree_element", "value": \
|
||||
"<root attr=\\"val\\"><child attr=\\"val\\"/></root>"}]'
|
||||
etree_el = etree.Element("root", attr="val")
|
||||
etree_el.append(etree.Element("child", attr="val"))
|
||||
expected = ["a", 1, etree.tostring(etree_el)]
|
||||
value = json.loads(value_json, cls=JobDecoder, env=self.env)
|
||||
value[2] = etree.tostring(value[2])
|
||||
self.assertEqual(value, expected)
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
# copyright 2018 Camptocamp
|
||||
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
|
||||
|
||||
from psycopg2 import IntegrityError
|
||||
|
||||
import odoo
|
||||
from odoo.tests import common
|
||||
|
||||
|
||||
class TestJobChannel(common.TransactionCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.Channel = self.env["queue.job.channel"]
|
||||
self.root_channel = self.Channel.search([("name", "=", "root")])
|
||||
|
||||
def test_channel_new(self):
|
||||
channel = self.Channel.new()
|
||||
self.assertFalse(channel.name)
|
||||
self.assertFalse(channel.complete_name)
|
||||
|
||||
def test_channel_create(self):
|
||||
channel = self.Channel.create(
|
||||
{"name": "test", "parent_id": self.root_channel.id}
|
||||
)
|
||||
self.assertEqual(channel.name, "test")
|
||||
self.assertEqual(channel.complete_name, "root.test")
|
||||
channel2 = self.Channel.create({"name": "test", "parent_id": channel.id})
|
||||
self.assertEqual(channel2.name, "test")
|
||||
self.assertEqual(channel2.complete_name, "root.test.test")
|
||||
|
||||
@odoo.tools.mute_logger("odoo.sql_db")
|
||||
def test_channel_complete_name_uniq(self):
|
||||
channel = self.Channel.create(
|
||||
{"name": "test", "parent_id": self.root_channel.id}
|
||||
)
|
||||
self.assertEqual(channel.name, "test")
|
||||
self.assertEqual(channel.complete_name, "root.test")
|
||||
|
||||
self.Channel.create({"name": "test", "parent_id": self.root_channel.id})
|
||||
|
||||
# Flush process all the pending recomputations (or at least the
|
||||
# given field and flush the pending updates to the database.
|
||||
# It is normally called on commit.
|
||||
|
||||
# The context manager 'with self.assertRaises(IntegrityError)' purposefully
|
||||
# not uses here due to its 'flush_all()' method inside it and exception raises
|
||||
# before the line 'self.env.flush_all()'. So, we are expecting an IntegrityError.
|
||||
try:
|
||||
self.env.flush_all()
|
||||
except IntegrityError as ex:
|
||||
self.assertIn("queue_job_channel_name_uniq", ex.pgerror)
|
||||
else:
|
||||
self.assertEqual(True, False)
|
||||
|
||||
def test_channel_name_get(self):
|
||||
channel = self.Channel.create(
|
||||
{"name": "test", "parent_id": self.root_channel.id}
|
||||
)
|
||||
self.assertEqual(channel.name_get(), [(channel.id, "root.test")])
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
# copyright 2020 Camptocamp
|
||||
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
|
||||
|
||||
from odoo import exceptions
|
||||
from odoo.tests import common
|
||||
|
||||
|
||||
class TestJobFunction(common.TransactionCase):
|
||||
def test_function_name_compute(self):
|
||||
function = self.env["queue.job.function"].create(
|
||||
{"model_id": self.env.ref("base.model_res_users").id, "method": "read"}
|
||||
)
|
||||
self.assertEqual(function.name, "<res.users>.read")
|
||||
|
||||
def test_function_name_inverse(self):
|
||||
function = self.env["queue.job.function"].create({"name": "<res.users>.read"})
|
||||
self.assertEqual(function.model_id.model, "res.users")
|
||||
self.assertEqual(function.method, "read")
|
||||
|
||||
def test_function_name_inverse_invalid_regex(self):
|
||||
with self.assertRaises(exceptions.UserError):
|
||||
self.env["queue.job.function"].create({"name": "<res.users.read"})
|
||||
|
||||
def test_function_name_inverse_model_not_found(self):
|
||||
with self.assertRaises(exceptions.UserError):
|
||||
self.env["queue.job.function"].create(
|
||||
{"name": "<this.model.does.not.exist>.read"}
|
||||
)
|
||||
|
||||
def test_function_job_config(self):
|
||||
channel = self.env["queue.job.channel"].create(
|
||||
{"name": "foo", "parent_id": self.env.ref("queue_job.channel_root").id}
|
||||
)
|
||||
job_function = self.env["queue.job.function"].create(
|
||||
{
|
||||
"model_id": self.env.ref("base.model_res_users").id,
|
||||
"method": "read",
|
||||
"channel_id": channel.id,
|
||||
"edit_retry_pattern": "{1: 2, 3: 4}",
|
||||
"edit_related_action": (
|
||||
'{"enable": True,'
|
||||
' "func_name": "related_action_foo",'
|
||||
' "kwargs": {"b": 1}}'
|
||||
),
|
||||
}
|
||||
)
|
||||
self.assertEqual(
|
||||
self.env["queue.job.function"].job_config("<res.users>.read"),
|
||||
self.env["queue.job.function"].JobConfig(
|
||||
channel="root.foo",
|
||||
retry_pattern={1: 2, 3: 4},
|
||||
related_action_enable=True,
|
||||
related_action_func_name="related_action_foo",
|
||||
related_action_kwargs={"b": 1},
|
||||
job_function_id=job_function.id,
|
||||
),
|
||||
)
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
# copyright 2020 Camptocamp
|
||||
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
|
||||
|
||||
from odoo import exceptions
|
||||
from odoo.tests import common
|
||||
|
||||
|
||||
class TestJobWriteProtected(common.TransactionCase):
|
||||
def test_create_error(self):
|
||||
with self.assertRaises(exceptions.AccessError):
|
||||
self.env["queue.job"].create(
|
||||
{"uuid": "test", "model_name": "res.partner", "method_name": "write"}
|
||||
)
|
||||
|
||||
def test_write_protected_field_error(self):
|
||||
job_ = self.env["res.partner"].with_delay().create({"name": "test"})
|
||||
db_job = job_.db_record()
|
||||
with self.assertRaises(exceptions.AccessError):
|
||||
db_job.method_name = "unlink"
|
||||
|
||||
def test_write_allow_no_protected_field_error(self):
|
||||
job_ = self.env["res.partner"].with_delay().create({"name": "test"})
|
||||
db_job = job_.db_record()
|
||||
db_job.priority = 30
|
||||
self.assertEqual(db_job.priority, 30)
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
# Copyright 2025 ACSONE SA/NV
|
||||
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
|
||||
from contextlib import closing
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from odoo.tests.common import TransactionCase
|
||||
|
||||
from odoo.addons.queue_job.job import Job
|
||||
from odoo.addons.queue_job.jobrunner.runner import Database
|
||||
|
||||
|
||||
class TestRequeueDeadJob(TransactionCase):
|
||||
def create_dummy_job(self, uuid):
|
||||
"""
|
||||
Create dummy job for tests
|
||||
"""
|
||||
return (
|
||||
self.env["queue.job"]
|
||||
.with_context(
|
||||
_job_edit_sentinel=self.env["queue.job"].EDIT_SENTINEL,
|
||||
)
|
||||
.create(
|
||||
{
|
||||
"uuid": uuid,
|
||||
"user_id": self.env.user.id,
|
||||
"state": "pending",
|
||||
"model_name": "queue.job",
|
||||
"method_name": "write",
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
def get_locks(self, uuid, cr=None):
|
||||
"""
|
||||
Retrieve lock rows
|
||||
"""
|
||||
if cr is None:
|
||||
cr = self.env.cr
|
||||
|
||||
cr.execute(
|
||||
"""
|
||||
SELECT
|
||||
queue_job_id
|
||||
FROM
|
||||
queue_job_lock
|
||||
WHERE
|
||||
queue_job_id IN (
|
||||
SELECT
|
||||
id
|
||||
FROM
|
||||
queue_job
|
||||
WHERE
|
||||
uuid = %s
|
||||
)
|
||||
FOR UPDATE SKIP LOCKED
|
||||
""",
|
||||
[uuid],
|
||||
)
|
||||
|
||||
return cr.fetchall()
|
||||
|
||||
def test_add_lock_record(self):
|
||||
queue_job = self.create_dummy_job("test_add_lock")
|
||||
job_obj = Job.load(self.env, queue_job.uuid)
|
||||
|
||||
job_obj.set_started()
|
||||
self.assertEqual(job_obj.state, "started")
|
||||
|
||||
locks = self.get_locks(job_obj.uuid)
|
||||
|
||||
self.assertEqual(1, len(locks))
|
||||
|
||||
def test_lock(self):
|
||||
queue_job = self.create_dummy_job("test_lock")
|
||||
job_obj = Job.load(self.env, queue_job.uuid)
|
||||
|
||||
job_obj.set_started()
|
||||
job_obj.store()
|
||||
|
||||
locks = self.get_locks(job_obj.uuid)
|
||||
|
||||
self.assertEqual(1, len(locks))
|
||||
|
||||
# commit to update queue_job records in DB
|
||||
self.env.cr.commit() # pylint: disable=E8102
|
||||
|
||||
job_obj.lock()
|
||||
|
||||
with closing(self.env.registry.cursor()) as new_cr:
|
||||
locks = self.get_locks(job_obj.uuid, new_cr)
|
||||
|
||||
# Row should be locked
|
||||
self.assertEqual(0, len(locks))
|
||||
|
||||
# clean up
|
||||
queue_job.unlink()
|
||||
|
||||
self.env.cr.commit() # pylint: disable=E8102
|
||||
|
||||
# because we committed the cursor, the savepoint of the test method is
|
||||
# gone, and this would break TransactionCase cleanups
|
||||
self.cr.execute("SAVEPOINT test_%d" % self._savepoint_id)
|
||||
|
||||
def test_requeue_dead_jobs(self):
|
||||
uuid = "test_requeue_dead_jobs"
|
||||
|
||||
queue_job = self.create_dummy_job(uuid)
|
||||
job_obj = Job.load(self.env, queue_job.uuid)
|
||||
|
||||
job_obj.set_enqueued()
|
||||
# simulate enqueuing was in the past
|
||||
job_obj.date_enqueued = datetime.now() - timedelta(minutes=1)
|
||||
job_obj.set_started()
|
||||
|
||||
job_obj.store()
|
||||
self.env.cr.commit() # pylint: disable=E8102
|
||||
|
||||
# requeue dead jobs using current cursor
|
||||
query = Database(self.env.cr.dbname)._query_requeue_dead_jobs()
|
||||
self.env.cr.execute(query)
|
||||
|
||||
uuids_requeued = self.env.cr.fetchall()
|
||||
|
||||
self.assertEqual(len(uuids_requeued), 1)
|
||||
self.assertEqual(uuids_requeued[0][0], uuid)
|
||||
|
||||
# clean up
|
||||
queue_job.unlink()
|
||||
self.env.cr.commit() # pylint: disable=E8102
|
||||
|
||||
# because we committed the cursor, the savepoint of the test method is
|
||||
# gone, and this would break TransactionCase cleanups
|
||||
self.cr.execute("SAVEPOINT test_%d" % self._savepoint_id)
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
# Copyright 2015-2016 Camptocamp SA
|
||||
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
|
||||
|
||||
# pylint: disable=odoo-addons-relative-import
|
||||
# we are testing, we want to test as we were an external consumer of the API
|
||||
from odoo.addons.queue_job.jobrunner import channels
|
||||
|
||||
from .common import load_doctests
|
||||
|
||||
load_tests = load_doctests(channels)
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
# Copyright 2015-2016 Camptocamp SA
|
||||
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
|
||||
|
||||
# pylint: disable=odoo-addons-relative-import
|
||||
# we are testing, we want to test as we were an external consumer of the API
|
||||
from odoo.addons.queue_job.jobrunner import runner
|
||||
|
||||
from .common import load_doctests
|
||||
|
||||
load_tests = load_doctests(runner)
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
|
||||
from odoo.tests import common
|
||||
|
||||
|
||||
class TestWizards(common.TransactionCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.job = (
|
||||
self.env["queue.job"]
|
||||
.with_context(
|
||||
_job_edit_sentinel=self.env["queue.job"].EDIT_SENTINEL,
|
||||
)
|
||||
.create(
|
||||
{
|
||||
"uuid": "test",
|
||||
"user_id": self.env.user.id,
|
||||
"state": "failed",
|
||||
"model_name": "queue.job",
|
||||
"method_name": "write",
|
||||
"args": (),
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
def _wizard(self, model_name):
|
||||
return (
|
||||
self.env[model_name]
|
||||
.with_context(
|
||||
active_model=self.job._name,
|
||||
active_ids=self.job.ids,
|
||||
)
|
||||
.create({})
|
||||
)
|
||||
|
||||
def test_01_requeue(self):
|
||||
wizard = self._wizard("queue.requeue.job")
|
||||
wizard.requeue()
|
||||
self.assertEqual(self.job.state, "pending")
|
||||
|
||||
def test_02_cancel(self):
|
||||
wizard = self._wizard("queue.jobs.to.cancelled")
|
||||
wizard.set_cancelled()
|
||||
self.assertEqual(self.job.state, "cancelled")
|
||||
|
||||
def test_03_done(self):
|
||||
wizard = self._wizard("queue.jobs.to.done")
|
||||
wizard.set_done()
|
||||
self.assertEqual(self.job.state, "done")
|
||||
Loading…
Add table
Add a link
Reference in a new issue