19.0 vanilla

This commit is contained in:
Ernad Husremovic 2026-03-09 09:30:27 +01:00
parent d1963a3c3a
commit 2d3ee4855a
7430 changed files with 2687981 additions and 2965473 deletions

View file

@ -2,14 +2,19 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import common
from . import test_ir_http
from . import test_survey
from . import test_survey_controller
from . import test_survey_flow
from . import test_survey_flow_with_conditions
from . import test_certification_flow
from . import test_survey_invite
from . import test_survey_security
from . import test_survey_randomize
from . import test_survey_ui_backend
from . import test_survey_ui_certification
from . import test_survey_ui_feedback
from . import test_survey_compute_pages_questions
from . import test_certification_badge
from . import test_survey_performance
from . import test_survey_results

View file

@ -1,28 +1,32 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import re
from collections import Counter
from contextlib import contextmanager
from contextlib import nullcontext
from odoo.addons.mail.tests.common import mail_new_test_user
from odoo.tests import common
class SurveyCase(common.TransactionCase):
def setUp(self):
super(SurveyCase, self).setUp()
@classmethod
def setUpClass(cls):
super(SurveyCase, cls).setUpClass()
""" Some custom stuff to make the matching between questions and answers
:param dict _type_match: dict
key: question type
value: (answer type, answer field_name)
"""
self._type_match = {
cls._type_match = {
'text_box': ('text_box', 'value_text_box'),
'char_box': ('char_box', 'value_char_box'),
'numerical_box': ('numerical_box', 'value_numerical_box'),
'scale': ('scale', 'value_scale'), # similar to numerical_box
'date': ('date', 'value_date'),
'datetime': ('datetime', 'value_datetime'),
'simple_choice': ('suggestion', 'suggested_answer_id'), # TDE: still unclear
@ -160,16 +164,18 @@ class SurveyCase(common.TransactionCase):
def _access_start(self, survey):
return self.url_open('/survey/start/%s' % survey.access_token)
def _access_page(self, survey, token):
return self.url_open('/survey/%s/%s' % (survey.access_token, token))
def _access_page(self, survey, token, query_count=None):
with self.assertQueryCount(query_count) if query_count else nullcontext():
return self.url_open('/survey/%s/%s' % (survey.access_token, token))
def _access_begin(self, survey, token):
url = survey.get_base_url() + '/survey/begin/%s/%s' % (survey.access_token, token)
return self.opener.post(url=url, json={})
url = survey.get_base_url() + f'/survey/begin/{survey.access_token}/{token}'
return self.url_open(url, json={'params': {'lang_code': 'en_US'}})
def _access_submit(self, survey, token, post_data):
def _access_submit(self, survey, token, post_data, query_count=None):
url = survey.get_base_url() + '/survey/submit/%s/%s' % (survey.access_token, token)
return self.opener.post(url=url, json={'params': post_data})
with self.assertQueryCount(query_count) if query_count else nullcontext():
return self.url_open(url, json={'params': post_data})
def _find_csrf_token(self, text):
csrf_token_re = re.compile("(input.+csrf_token.+value=\")([a-f0-9]{40}o[0-9]*)", re.MULTILINE)
@ -192,14 +198,15 @@ class SurveyCase(common.TransactionCase):
post_data[question.id] = str(values)
return post_data
def _answer_question(self, question, answer, answer_token, csrf_token, button_submit='next'):
def _answer_question(self, question, answer, answer_token, csrf_token, button_submit='next',
submit_query_count=None, access_page_query_count=None):
# Employee submits the question answer
post_data = self._format_submission_data(question, answer, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': button_submit})
response = self._access_submit(question.survey_id, answer_token, post_data)
response = self._access_submit(question.survey_id, answer_token, post_data, query_count=submit_query_count)
self.assertResponse(response, 200)
# Employee is redirected on next question
response = self._access_page(question.survey_id, answer_token)
response = self._access_page(question.survey_id, answer_token, query_count=access_page_query_count)
self.assertResponse(response, 200)
def _answer_page(self, page, answers, answer_token, csrf_token):
@ -236,11 +243,11 @@ class SurveyCase(common.TransactionCase):
False
)
self.assertTrue(bool(question_data))
self.assertEqual(len(question_data.get('answer_input_skipped_ids')), 1)
self.assertEqual(len(question_data.get('answer_input_ids') - question_data.get('answer_input_done_ids')), 1)
def _create_one_question_per_type(self):
all_questions = self.env['survey.question']
for (question_type, dummy) in self.env['survey.question']._fields['question_type'].selection:
for (question_type, _dummy) in self.env['survey.question']._fields['question_type'].selection:
kwargs = {}
if question_type == 'multiple_choice':
kwargs['labels'] = [{'value': 'MChoice0'}, {'value': 'MChoice1'}]
@ -253,64 +260,184 @@ class SurveyCase(common.TransactionCase):
return all_questions
def _create_one_question_per_type_with_scoring(self):
all_questions = self.env['survey.question']
for (question_type, _dummy) in self.env['survey.question']._fields['question_type'].selection:
kwargs = {}
kwargs['question_type'] = question_type
if question_type == 'numerical_box':
kwargs['answer_score'] = 1
kwargs['answer_numerical_box'] = 5
elif question_type == 'date':
kwargs['answer_score'] = 2
kwargs['answer_date'] = datetime.date(2023, 10, 16)
elif question_type == 'datetime':
kwargs['answer_score'] = 3
kwargs['answer_datetime'] = datetime.datetime(2023, 11, 17, 8, 0, 0)
elif question_type == 'multiple_choice':
kwargs['answer_score'] = 4
kwargs['labels'] = [
{'value': 'MChoice0', 'is_correct': True},
{'value': 'MChoice1', 'is_correct': True},
{'value': 'MChoice2'}
]
elif question_type == 'simple_choice':
kwargs['answer_score'] = 5
kwargs['labels'] = [
{'value': 'SChoice0', 'is_correct': True},
{'value': 'SChoice1'}
]
elif question_type == 'matrix':
kwargs['labels'] = [{'value': 'Column0'}, {'value': 'Column1'}]
kwargs['labels_2'] = [{'value': 'Row0'}, {'value': 'Row1'}]
all_questions |= self._add_question(self.page_0, 'Q0', question_type, **kwargs)
return all_questions
class TestSurveyCommon(SurveyCase):
def setUp(self):
super(TestSurveyCommon, self).setUp()
@classmethod
def setUpClass(cls):
super().setUpClass()
""" Create test data: a survey with some pre-defined questions and various test users for ACL """
self.survey_manager = mail_new_test_user(
self.env, name='Gustave Doré', login='survey_manager', email='survey.manager@example.com',
groups='survey.group_survey_manager,base.group_user'
cls.survey_manager = mail_new_test_user(
cls.env, name='Gustave Doré', login='survey_manager', email='survey.manager@example.com',
groups='survey.group_survey_manager,base.group_user', tz='Europe/Brussels',
)
self.survey_user = mail_new_test_user(
self.env, name='Lukas Peeters', login='survey_user', email='survey.user@example.com',
cls.survey_user = mail_new_test_user(
cls.env, name='Lukas Peeters', login='survey_user', email='survey.user@example.com',
groups='survey.group_survey_user,base.group_user'
)
self.user_emp = mail_new_test_user(
self.env, name='Eglantine Employee', login='user_emp', email='employee@example.com',
cls.user_emp = mail_new_test_user(
cls.env, name='Eglantine Employee', login='user_emp', email='employee@example.com',
groups='base.group_user', password='user_emp'
)
self.user_portal = mail_new_test_user(
self.env, name='Patrick Portal', login='user_portal', email='portal@example.com',
cls.user_portal = mail_new_test_user(
cls.env, name='Patrick Portal', login='user_portal', email='portal@example.com',
groups='base.group_portal'
)
self.user_public = mail_new_test_user(
self.env, name='Pauline Public', login='user_public', email='public@example.com',
cls.user_public = mail_new_test_user(
cls.env, name='Pauline Public', login='user_public', email='public@example.com',
groups='base.group_public'
)
self.customer = self.env['res.partner'].create({
cls.customer = cls.env['res.partner'].create({
'name': 'Caroline Customer',
'email': 'customer@example.com',
})
self.survey = self.env['survey.survey'].with_user(self.survey_manager).create({
cls.survey = cls.env['survey.survey'].with_user(cls.survey_manager).create({
'title': 'Test Survey',
'access_mode': 'public',
'users_login_required': True,
'users_can_go_back': False,
})
self.page_0 = self.env['survey.question'].with_user(self.survey_manager).create({
cls.page_0 = cls.env['survey.question'].with_user(cls.survey_manager).create({
'title': 'First page',
'survey_id': self.survey.id,
'survey_id': cls.survey.id,
'sequence': 1,
'is_page': True,
'question_type': False,
})
self.question_ft = self.env['survey.question'].with_user(self.survey_manager).create({
cls.question_ft = cls.env['survey.question'].with_user(cls.survey_manager).create({
'title': 'Test Free Text',
'survey_id': self.survey.id,
'survey_id': cls.survey.id,
'sequence': 2,
'question_type': 'text_box',
})
self.question_num = self.env['survey.question'].with_user(self.survey_manager).create({
cls.question_num = cls.env['survey.question'].with_user(cls.survey_manager).create({
'title': 'Test NUmerical Box',
'survey_id': self.survey.id,
'survey_id': cls.survey.id,
'sequence': 3,
'question_type': 'numerical_box',
})
cls.question_scale = cls.env['survey.question'].with_user(cls.survey_manager).create({
'title': 'Test Scale',
'survey_id': cls.survey.id,
'sequence': 40,
'question_type': 'scale',
})
class TestSurveyResultsCommon(SurveyCase):
@classmethod
def setUpClass(cls):
super(TestSurveyResultsCommon, cls).setUpClass()
cls.survey_manager = mail_new_test_user(
cls.env, name='Gustave Doré', login='survey_manager', email='survey.manager@example.com',
groups='survey.group_survey_manager,base.group_user'
)
# Create survey with questions
cls.survey = cls.env['survey.survey'].create({
'title': 'Test Survey Results',
'questions_layout': 'one_page'
})
cls.question_char_box = cls._add_question(
cls, None, 'What is your name', 'char_box', survey_id=cls.survey.id, sequence='1')
cls.question_numerical_box = cls._add_question(
cls, None, 'What is your age', 'numerical_box', survey_id=cls.survey.id, sequence='2')
cls.question_sc = cls._add_question(
cls, None, 'Are you a cat or a dog person', 'simple_choice', survey_id=cls.survey.id,
sequence='3', labels=[{'value': 'Cat'},
{'value': 'Dog'}])
cls.question_mc = cls._add_question(
cls, None, 'What do you like most in our tarte al djotte', 'multiple_choice', survey_id=cls.survey.id,
sequence='4', labels=[{'value': 'The gras'},
{'value': 'The bette'},
{'value': 'The tout'},
{'value': 'The regime is fucked up'}])
cls.question_mx1 = cls._add_question(
cls, None, 'When do you harvest those fruits', 'matrix', survey_id=cls.survey.id, sequence='5',
labels=[{'value': 'Spring'}, {'value': 'Summer'}],
labels_2=[{'value': 'Apples'},
{'value': 'Strawberries'}])
cls.question_mx2 = cls._add_question(
cls, None, 'How often should you water those plants', 'matrix', survey_id=cls.survey.id, sequence='6',
labels=[{'value': 'Once a month'}, {'value': 'Once a week'}],
labels_2=[{'value': 'Cactus'},
{'value': 'Ficus'}])
cls.question_scale = cls._add_question(
cls, None, 'How would you rate your experience on our website ?', 'scale', survey_id=cls.survey.id, sequence='7',
)
# Question answers ids
[cls.cat_id, cls.dog_id] = cls.question_sc.suggested_answer_ids.ids
[cls.gras_id, cls.bette_id, _, _] = cls.question_mc.suggested_answer_ids.ids
[cls.apples_row_id, cls.strawberries_row_id] = cls.question_mx1.matrix_row_ids.ids
[cls.spring_id, cls.summer_id] = cls.question_mx1.suggested_answer_ids.ids
[cls.cactus_row_id, cls.ficus_row_id] = cls.question_mx2.matrix_row_ids.ids
[cls.once_a_month_id, cls.once_a_week_id] = cls.question_mx2.suggested_answer_ids.ids
# Populate survey with answers
cls.user_input_1 = cls._add_answer(cls, cls.survey, cls.survey_manager.partner_id)
cls.answer_lukas = cls._add_answer_line(cls, cls.question_char_box, cls.user_input_1, 'Lukas')
cls.answer_24 = cls._add_answer_line(cls, cls.question_numerical_box, cls.user_input_1, 24)
cls.answer_cat = cls._add_answer_line(cls, cls.question_sc, cls.user_input_1, cls.cat_id)
cls._add_answer_line(cls, cls.question_mc, cls.user_input_1, cls.gras_id)
cls._add_answer_line(cls, cls.question_mx1, cls.user_input_1, cls.summer_id, **{'answer_value_row': cls.apples_row_id})
cls._add_answer_line(cls, cls.question_mx1, cls.user_input_1, cls.spring_id, **{'answer_value_row': cls.strawberries_row_id})
cls._add_answer_line(cls, cls.question_mx2, cls.user_input_1, cls.once_a_month_id, **{'answer_value_row': cls.cactus_row_id})
cls._add_answer_line(cls, cls.question_mx2, cls.user_input_1, cls.once_a_week_id, **{'answer_value_row': cls.ficus_row_id})
cls._add_answer_line(cls, cls.question_scale, cls.user_input_1, '5')
cls.user_input_1.state = 'done'
cls.user_input_2 = cls._add_answer(cls, cls.survey, cls.survey_manager.partner_id)
cls.answer_pauline = cls._add_answer_line(cls, cls.question_char_box, cls.user_input_2, 'Pauline')
cls._add_answer_line(cls, cls.question_numerical_box, cls.user_input_2, 24)
cls.answer_dog = cls._add_answer_line(cls, cls.question_sc, cls.user_input_2, cls.dog_id)
cls._add_answer_line(cls, cls.question_mc, cls.user_input_2, cls.gras_id)
cls._add_answer_line(cls, cls.question_mc, cls.user_input_2, cls.bette_id)
cls._add_answer_line(cls, cls.question_mx1, cls.user_input_2, cls.spring_id, **{'answer_value_row': cls.apples_row_id})
cls._add_answer_line(cls, cls.question_mx1, cls.user_input_2, cls.spring_id, **{'answer_value_row': cls.strawberries_row_id})
cls._add_answer_line(cls, cls.question_mx2, cls.user_input_2, cls.once_a_month_id, **{'answer_value_row': cls.cactus_row_id})
cls._add_answer_line(cls, cls.question_mx2, cls.user_input_2, cls.once_a_month_id, **{'answer_value_row': cls.ficus_row_id})
cls.scale_answer_line_2 = cls._add_answer_line(cls, cls.question_scale, cls.user_input_1, '7')
cls.user_input_2.state = 'done'

View file

@ -144,9 +144,9 @@ class TestCertificationBadge(common.TestSurveyCommon):
with self.assertRaises(AccessError):
self.certification_badge.with_user(self.user_emp).write({'description': "I'm a dude who think that has every right on the Iron Throne"})
with self.assertRaises(AccessError):
self.certification_badge.with_user(self.user_portal).write({'description': "Guy, you just can't do that !"})
self.certification_badge.with_user(self.user_portal).write({'description': "Guy, you just can't do that!"})
with self.assertRaises(AccessError):
self.certification_badge.with_user(self.user_public).write({'description': "What did you expect ? Schwepps !"})
self.certification_badge.with_user(self.user_public).write({'description': "What did you expect ? Schwepps!"})
def test_badge_configuration_multi(self):
vals = {

View file

@ -3,14 +3,16 @@
from unittest.mock import patch
from odoo.addons.base.models.ir_mail_server import IrMailServer
from odoo import Command
from odoo.addons.base.models.ir_mail_server import IrMail_Server
from odoo.addons.mail.tests.common import MockEmail
from odoo.addons.survey.tests import common
from odoo.tests import tagged
from odoo.tests.common import HttpCase
@tagged('-at_install', 'post_install', 'functional')
class TestCertificationFlow(common.TestSurveyCommon, HttpCase):
@tagged('-at_install', 'post_install', 'functional', 'is_query_count')
class TestCertificationFlow(common.TestSurveyCommon, MockEmail, HttpCase):
def test_flow_certification(self):
# Step: survey user creates the certification
@ -78,6 +80,11 @@ class TestCertificationFlow(common.TestSurveyCommon, HttpCase):
{'value': 'a_future_and_yet_unknown_model', 'is_correct': True, 'answer_score': 1.0},
{'value': 'none', 'answer_score': -1.0}
])
q06 = self._add_question(
None, 'Are you sure of all your answers (not rated)', 'simple_choice',
sequence=6,
constr_mandatory=False, survey_id=certification.id,
labels=[{'value': 'Yes'}, {'value': 'No'}])
# Step: employee takes the certification
# --------------------------------------------------
@ -101,20 +108,46 @@ class TestCertificationFlow(common.TestSurveyCommon, HttpCase):
r = self._access_begin(certification, answer_token)
self.assertResponse(r, 200)
with patch.object(IrMailServer, 'connect'):
with self.mock_mail_gateway():
self._answer_question(q01, q01.suggested_answer_ids.ids[3], answer_token, csrf_token)
self._answer_question(q02, q02.suggested_answer_ids.ids[1], answer_token, csrf_token)
self._answer_question(q02, q02.suggested_answer_ids.ids[0], answer_token, csrf_token) # incorrect => no points
self._answer_question(q03, "", answer_token, csrf_token, button_submit='previous')
self._answer_question(q02, q02.suggested_answer_ids.ids[1], answer_token, csrf_token) # correct answer
self._answer_question(q03, "I think they're great!", answer_token, csrf_token)
self._answer_question(q04, q04.suggested_answer_ids.ids[0], answer_token, csrf_token, button_submit='previous')
self._answer_question(q03, "Just kidding, I don't like it...", answer_token, csrf_token)
self._answer_question(q04, q04.suggested_answer_ids.ids[0], answer_token, csrf_token)
self._answer_question(q05, [q05.suggested_answer_ids.ids[0], q05.suggested_answer_ids.ids[1], q05.suggested_answer_ids.ids[3]], answer_token, csrf_token)
self._answer_question(q04, q04.suggested_answer_ids.ids[0], answer_token, csrf_token,
submit_query_count=43, access_page_query_count=24)
q05_answers = q05.suggested_answer_ids.ids[0:2] + [q05.suggested_answer_ids.ids[3]]
self._answer_question(q05, q05_answers, answer_token, csrf_token,
submit_query_count=28, access_page_query_count=24)
self._answer_question(q06, q06.suggested_answer_ids.ids[0], answer_token, csrf_token,
submit_query_count=108, access_page_query_count=24)
user_inputs.invalidate_recordset()
# Check that certification is successfully passed
self.assertEqual(user_inputs.scoring_percentage, 87.5)
self.assertTrue(user_inputs.scoring_success)
# assert statistics
statistics = user_inputs._prepare_statistics()[user_inputs]
total_statistics = statistics['totals']
self.assertEqual(
sorted(
total_statistics,
key=lambda item: item['text']
),
sorted(
[
{'text': 'Correct', 'count': 2},
{'text': 'Partially', 'count': 1},
{'text': 'Incorrect', 'count': 0},
{'text': 'Unanswered', 'count': 0},
],
key=lambda item: item['text']
)
)
# Check that the certification is still successful even if scoring_success_min of certification is modified
certification.write({'scoring_success_min': 90})
self.assertTrue(user_inputs.scoring_success)
@ -123,12 +156,37 @@ class TestCertificationFlow(common.TestSurveyCommon, HttpCase):
self.assertNotIn("I think they're great!", user_inputs.mapped('user_input_line_ids.value_text_box'))
self.assertIn("Just kidding, I don't like it...", user_inputs.mapped('user_input_line_ids.value_text_box'))
certification_email = self.env['mail.mail'].sudo().search([], limit=1, order="create_date desc")
# Check certification email correctly sent and contains document
self.assertIn("User Certification for SO lines", certification_email.subject)
self.assertIn("employee@example.com", certification_email.email_to)
self.assertEqual(len(certification_email.attachment_ids), 1)
self.assertEqual(certification_email.attachment_ids[0].name, 'Certification Document.html')
self.assertMailMail(
self.user_emp.partner_id,
'outgoing',
fields_values={
'attachments_info': [
{'name': f'Certification - {certification.title}.html'},
],
'subject': f'Certification: {certification.title}',
},
)
# Check that the certification can be printed without access to the participant's company
with self.with_user('admin'):
new_company = self.env['res.company'].create({
'name': 'newB',
})
user_new_company = self.env['res.users'].create({
'name': 'No access right user',
'login': 'user_new_company',
'password': 'user_new_company',
'group_ids': [
Command.set(self.env.ref('base.group_user').ids),
Command.link(self.env.ref('survey.group_survey_user').id),
],
'company_id': new_company.id,
'company_ids': [new_company.id],
})
new_company.invalidate_model() # cache pollution
self.env['ir.actions.report'].with_user(user_new_company).with_company(new_company)\
._render_qweb_pdf('survey.certification_report_view', res_ids=user_inputs.ids)
def test_randomized_certification(self):
# Step: survey user creates the randomized certification
@ -188,7 +246,7 @@ class TestCertificationFlow(common.TestSurveyCommon, HttpCase):
r = self._access_begin(certification, answer_token)
self.assertResponse(r, 200)
with patch.object(IrMailServer, 'connect'):
with patch.object(IrMail_Server, '_connect__'):
question_ids = user_inputs.predefined_question_ids
self.assertEqual(len(question_ids), 1, 'Only one question should have been selected by the randomization')
# Whatever which question was selected, the correct answer is the first one
@ -196,12 +254,22 @@ class TestCertificationFlow(common.TestSurveyCommon, HttpCase):
statistics = user_inputs._prepare_statistics()[user_inputs]
total_statistics = statistics['totals']
self.assertEqual(total_statistics, [
{'text': 'Correct', 'count': 1},
{'text': 'Partially', 'count': 0},
{'text': 'Incorrect', 'count': 0},
{'text': 'Unanswered', 'count': 0},
], "With the configured randomization, there should be exactly 1 correctly answered question and none skipped.")
self.assertEqual(
sorted(
total_statistics,
key=lambda item: item['text']
),
sorted(
[
{'text': 'Correct', 'count': 1},
{'text': 'Partially', 'count': 0},
{'text': 'Incorrect', 'count': 0},
{'text': 'Unanswered', 'count': 0},
],
key=lambda item: item['text']
),
"With the configured randomization, there should be exactly 1 correctly answered question and none skipped."
)
section_statistics = statistics['by_section']
self.assertEqual(section_statistics, {

View file

@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import TransactionCase
class TestIrHttp(TransactionCase):
def test_is_survey_frontend(self):
IrHttp = self.env['ir.http']
self.assertTrue(IrHttp._is_survey_frontend('/survey/test'))
self.assertTrue(IrHttp._is_survey_frontend('/fr_BE/survey/test'))
self.assertTrue(IrHttp._is_survey_frontend('/fr/survey/test'))
self.assertTrue(IrHttp._is_survey_frontend('/hr/survey/test')) # we can't avoid that (hr is a language anyway)
self.assertFalse(IrHttp._is_survey_frontend('/hr/event/test'))
self.assertFalse(IrHttp._is_survey_frontend('/event'))
self.assertFalse(IrHttp._is_survey_frontend('/event/survey/test'))
self.assertFalse(IrHttp._is_survey_frontend('/eveNT/survey/test'))
self.assertFalse(IrHttp._is_survey_frontend('/fr_BE/event/test'))
self.assertFalse(IrHttp._is_survey_frontend('/fr/event/test'))

View file

@ -1,14 +1,57 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
from freezegun import freeze_time
from odoo import _, Command, fields
from odoo.addons.mail.tests.common import MailCase
from odoo.addons.survey.tests import common
from odoo.tests.common import users
class TestSurveyInternals(common.TestSurveyCommon):
class TestSurveyInternals(common.TestSurveyCommon, MailCase):
@users('survey_manager')
def test_allowed_triggering_question_ids(self):
# Create 2 surveys, each with 3 questions, each with 2 suggested answers
survey_1, survey_2 = self.env['survey.survey'].create([
{'title': 'Test Survey 1'},
{'title': 'Test Survey 2'}
])
self.env['survey.question'].create([
{
'survey_id': survey_id,
'title': f'Question {question_idx}',
'question_type': 'simple_choice',
'suggested_answer_ids': [
Command.create({
'value': f'Answer {answer_idx}',
}) for answer_idx in range(2)],
}
for question_idx in range(3)
for survey_id in (survey_1 | survey_2).ids
])
survey_1_q_1, survey_1_q_2, _ = survey_1.question_ids
survey_2_q_1, survey_2_q_2, _ = survey_2.question_ids
with self.subTest('Editing existing questions'):
# Only previous questions from the same survey
self.assertFalse(bool(survey_1_q_2.allowed_triggering_question_ids & survey_2_q_2.allowed_triggering_question_ids))
self.assertEqual(survey_1_q_2.allowed_triggering_question_ids, survey_1_q_1)
self.assertEqual(survey_2_q_2.allowed_triggering_question_ids, survey_2_q_1)
survey_1_new_question = self.env['survey.question'].new({'survey_id': survey_1})
survey_2_new_question = self.env['survey.question'].new({'survey_id': survey_2})
with self.subTest('New questions'):
# New questions should be allowed to use any question with choices from the same survey
self.assertFalse(
bool(survey_1_new_question.allowed_triggering_question_ids & survey_2_new_question.allowed_triggering_question_ids)
)
self.assertEqual(survey_1_new_question.allowed_triggering_question_ids.ids, survey_1.question_ids.ids)
self.assertEqual(survey_2_new_question.allowed_triggering_question_ids.ids, survey_2.question_ids.ids)
def test_answer_attempts_count(self):
""" As 'attempts_number' and 'attempts_count' are computed using raw SQL queries, let us
@ -44,6 +87,7 @@ class TestSurveyInternals(common.TestSurveyCommon):
self.assertEqual(fourth_attempt['attempts_count'], 4)
@freeze_time("2020-02-15 18:00")
@users('survey_manager')
def test_answer_display_name(self):
""" The "display_name" field in a survey.user_input.line is a computed field that will
display the answer label for any type of question.
@ -67,7 +111,7 @@ class TestSurveyInternals(common.TestSurveyCommon):
self.assertEqual(question_answer.display_name, '2020-02-15')
elif question.question_type == 'datetime':
question_answer = self._add_answer_line(question, user_input, fields.Datetime.now())
self.assertEqual(question_answer.display_name, '2020-02-15 18:00:00')
self.assertEqual(question_answer.display_name, '2020-02-15 19:00:00')
elif question.question_type == 'simple_choice':
question_answer = self._add_answer_line(question, user_input, question.suggested_answer_ids[0].id)
self.assertEqual(question_answer.display_name, 'SChoice0')
@ -83,6 +127,9 @@ class TestSurveyInternals(common.TestSurveyCommon):
question_answer_2 = self._add_answer_line(question, user_input,
question.suggested_answer_ids[0].id, **{'answer_value_row': question.matrix_row_ids[1].id})
self.assertEqual(question_answer_2.display_name, 'Column0: Row1')
elif question.question_type == 'scale':
question_answer = self._add_answer_line(question, user_input, '3')
self.assertEqual(question_answer.display_name, '3')
@users('survey_manager')
def test_answer_validation_mandatory(self):
@ -100,7 +147,7 @@ class TestSurveyInternals(common.TestSurveyCommon):
validation_min_date='2015-03-20', validation_max_date='2015-03-25', validation_error_msg='ValidationError')
self.assertEqual(
question.validate_question('Is Alfred an answer ?'),
question.validate_question('Is Alfred an answer?'),
{question.id: _('This is not a date')}
)
@ -126,7 +173,7 @@ class TestSurveyInternals(common.TestSurveyCommon):
validation_min_float_value=2.2, validation_max_float_value=3.3, validation_error_msg='ValidationError')
self.assertEqual(
question.validate_question('Is Alfred an answer ?'),
question.validate_question('Is Alfred an answer?'),
{question.id: _('This is not a number')}
)
@ -180,6 +227,86 @@ class TestSurveyInternals(common.TestSurveyCommon):
{}
)
@users('survey_manager')
def test_simple_choice_validation_multiple_answers(self):
"""
Check that a 'simple_choice' question fails validation if more than one
valid answer is provided.
"""
question = self._add_question(
self.page_0, 'Simple Choice Constraint Test', 'simple_choice',
constr_mandatory=True,
comments_allowed=True,
comment_count_as_answer=True,
labels=[{'value': 'Choice X'}, {'value': 'Choice Y'}]
)
answer_choice_x_id = question.suggested_answer_ids[0].id
answer_choice_y_id = question.suggested_answer_ids[1].id
scenarios = [
(
'Two selected choices should not be allowed',
[answer_choice_x_id, answer_choice_y_id],
None,
True,
),
(
'One choice and one comment that counts as an answer',
answer_choice_x_id,
'This is my comment, which is also an answer.',
True,
),
(
'A single valid answer should pass validation',
answer_choice_x_id,
None,
False,
),
(
'A single valid comment should pass validation',
'',
'This is my comment, which is also an answer.',
False,
),
]
for case_description, answers, comment, is_multiple_answers in scenarios:
with self.subTest(answers=answers, comment=comment):
self.assertEqual(
question.validate_question(answers, comment),
{question.id: 'For this question, you can only select one answer.'} if is_multiple_answers else {},
case_description,
)
@users('survey_manager')
def test_answer_validation_comment(self):
""" Check that a comment validates a mandatory question based on 'comment_count_as_answer'. """
# Scenario 1: A comment counts as a valid answer.
question_ok = self._add_question(
self.page_0, 'Q_OK', 'multiple_choice',
constr_mandatory=True, validation_error_msg='ValidationError',
comments_allowed=True,
comment_count_as_answer=True,
labels=[{'value': 'Choice A'}, {'value': 'Choice B'}])
self.assertEqual(
question_ok.validate_question(answer='', comment='This comment is a valid answer.'),
{}
)
# Scenario 2: A comment does NOT count as a valid answer.
question_fail = self._add_question(
self.page_0, 'Q_FAIL', 'multiple_choice',
constr_mandatory=True, validation_error_msg='ValidationError',
comments_allowed=True,
comment_count_as_answer=False,
labels=[{'value': 'Choice A'}, {'value': 'Choice B'}])
self.assertEqual(
question_fail.validate_question(answer='', comment='This comment is not enough.'),
{question_fail.id: 'TestError'}
)
def test_partial_scores_simple_choice(self):
"""" Check that if partial scores are given for partially correct answers, in the case of a multiple
choice question with single choice, choosing the answer with max score gives 100% of points. """
@ -220,6 +347,83 @@ class TestSurveyInternals(common.TestSurveyCommon):
self.assertEqual(user_input.scoring_percentage, 100)
self.assertTrue(user_input.scoring_success)
def test_session_code_generation(self):
surveys = self.env['survey.survey'].create([{
'title': f'Survey {i}'
} for i in range(30)])
survey_codes = surveys.mapped('session_code')
self.assertEqual(len(survey_codes), 30)
for code in survey_codes:
self.assertTrue(bool(code))
self.assertEqual(
len(surveys.filtered(lambda survey: survey.session_code == code)),
1,
f"Each code should be unique, found multiple occurrences of: {code}"
)
def test_simple_choice_question_answer_result(self):
test_survey = self.env['survey.survey'].create({
'title': 'Test This Survey',
'scoring_type': 'scoring_with_answers',
'scoring_success_min': 80.0,
})
[a_01, a_02, a_03, a_04] = self.env['survey.question.answer'].create([{
'value': 'In Europe',
'answer_score': 0.0,
'is_correct': False
}, {
'value': 'In Asia',
'answer_score': 5.0,
'is_correct': True
}, {
'value': 'In South Asia',
'answer_score': 10.0,
'is_correct': True
}, {
'value': 'On Globe',
'answer_score': 5.0,
'is_correct': False
}])
q_01 = self.env['survey.question'].create({
'survey_id': test_survey.id,
'title': 'Where is india?',
'sequence': 1,
'question_type': 'simple_choice',
'suggested_answer_ids': [(6, 0, (a_01 | a_02 | a_03 | a_04).ids)]
})
user_input = self.env['survey.user_input'].create({'survey_id': test_survey.id})
user_input_line = self.env['survey.user_input.line'].create({
'user_input_id': user_input.id,
'question_id': q_01.id,
'answer_type': 'suggestion',
'suggested_answer_id': a_01.id
})
def assert_answer_status(expected_answer_status, questions_statistics):
"""Assert counts for 'Correct', 'Partially', 'Incorrect', 'Unanswered' are 0, and 1 for our expected answer status"""
for status, count in [(total['text'], total['count']) for total in questions_statistics['totals']]:
self.assertEqual(count, 1 if status == expected_answer_status else 0)
# this answer is incorrect with no score: should be considered as incorrect
statistics = user_input._prepare_statistics()[user_input]
assert_answer_status('Incorrect', statistics)
# this answer is correct with a positive score (even if not the maximum): should be considered as correct
user_input_line.suggested_answer_id = a_02.id
statistics = user_input._prepare_statistics()[user_input]
assert_answer_status('Correct', statistics)
# this answer is correct with the best score: should be considered as correct
user_input_line.suggested_answer_id = a_03.id
statistics = user_input._prepare_statistics()[user_input]
assert_answer_status('Correct', statistics)
# this answer is incorrect but has a score: should be considered as "partially"
user_input_line.suggested_answer_id = a_04.id
statistics = user_input._prepare_statistics()[user_input]
assert_answer_status('Partially', statistics)
@users('survey_manager')
def test_skipped_values(self):
""" Create one question per type of questions.
@ -231,11 +435,35 @@ class TestSurveyInternals(common.TestSurveyCommon):
for question in questions:
answer = '' if question.question_type in ['char_box', 'text_box'] else None
survey_user.save_lines(question, answer)
survey_user._save_lines(question, answer)
for question in questions:
self._assert_skipped_question(question, survey_user)
@users('survey_manager')
def test_multiple_choice_comment_not_skipped(self):
""" Test that a multiple choice question with only a comment is not marked as skipped. """
survey_user = self.survey._create_answer(user=self.survey_user)
question = self._add_question(
self.page_0, 'MCQ with Comment', 'multiple_choice',
comments_allowed=True,
comment_count_as_answer=True,
labels=[{'value': 'Choice A'}, {'value': 'Choice B'}]
)
# Save an answer with no selected choice but with a comment.
survey_user._save_lines(question, answer=[], comment='This is only a comment')
answer_line = self.env['survey.user_input.line'].search([
('user_input_id', '=', survey_user.id),
('question_id', '=', question.id)
])
self.assertEqual(len(answer_line), 1)
self.assertFalse(answer_line.skipped)
self.assertEqual(answer_line.answer_type, 'char_box')
self.assertEqual(answer_line.value_char_box, 'This is only a comment')
@users('survey_manager')
def test_copy_conditional_question_settings(self):
""" Create a survey with conditional layout, clone it and verify that the cloned survey has the same conditional
@ -246,22 +474,21 @@ class TestSurveyInternals(common.TestSurveyCommon):
return survey.question_ids.filtered(lambda q: q.title == title)[0]
# Create the survey questions (! texts of the questions must be unique as they are used to query them)
q_is_vegetarian_text = 'Are you vegetarian ?'
q_is_vegetarian_text = 'Are you vegetarian?'
q_is_vegetarian = self._add_question(
self.page_0, q_is_vegetarian_text, 'multiple_choice', survey_id=self.survey.id,
sequence=100, labels=[{'value': 'Yes'}, {'value': 'No'}])
sequence=100, labels=[{'value': 'Yes'}, {'value': 'No'}, {'value': 'Sometimes'}])
q_food_vegetarian_text = 'Choose your green meal'
self._add_question(self.page_0, q_food_vegetarian_text, 'multiple_choice',
is_conditional=True, sequence=101,
triggering_question_id=q_is_vegetarian.id,
triggering_answer_id=q_is_vegetarian.suggested_answer_ids[0].id,
sequence=101,
triggering_answer_ids=[q_is_vegetarian.suggested_answer_ids[0].id,
q_is_vegetarian.suggested_answer_ids[2].id],
survey_id=self.survey.id,
labels=[{'value': 'Vegetarian pizza'}, {'value': 'Vegetarian burger'}])
q_food_not_vegetarian_text = 'Choose your meal'
q_food_not_vegetarian_text = 'Choose your meal in case we serve meet/fish'
self._add_question(self.page_0, q_food_not_vegetarian_text, 'multiple_choice',
is_conditional=True, sequence=102,
triggering_question_id=q_is_vegetarian.id,
triggering_answer_id=q_is_vegetarian.suggested_answer_ids[1].id,
sequence=102,
triggering_answer_ids=q_is_vegetarian.suggested_answer_ids[1].ids,
survey_id=self.survey.id,
labels=[{'value': 'Steak with french fries'}, {'value': 'Fish'}])
@ -273,29 +500,25 @@ class TestSurveyInternals(common.TestSurveyCommon):
q_food_vegetarian_cloned = get_question_by_title(survey_clone, q_food_vegetarian_text)
q_food_not_vegetarian_cloned = get_question_by_title(survey_clone, q_food_not_vegetarian_text)
self.assertFalse(q_is_vegetarian_cloned.is_conditional)
self.assertFalse(bool(q_is_vegetarian_cloned.triggering_answer_ids))
# Vegetarian choice
self.assertTrue(q_food_vegetarian_cloned)
self.assertTrue(bool(q_food_vegetarian_cloned))
# Correct conditional layout
self.assertEqual(q_food_vegetarian_cloned.triggering_question_id.id, q_is_vegetarian_cloned.id)
self.assertEqual(q_food_vegetarian_cloned.triggering_answer_id.id,
q_is_vegetarian_cloned.suggested_answer_ids[0].id)
self.assertEqual(q_food_vegetarian_cloned.triggering_answer_ids.ids,
[q_is_vegetarian_cloned.suggested_answer_ids[0].id, q_is_vegetarian_cloned.suggested_answer_ids[2].id])
# Doesn't reference the original survey
self.assertNotEqual(q_food_vegetarian_cloned.triggering_question_id.id, q_is_vegetarian.id)
self.assertNotEqual(q_food_vegetarian_cloned.triggering_answer_id.id,
q_is_vegetarian.suggested_answer_ids[0].id)
self.assertNotEqual(q_food_vegetarian_cloned.triggering_answer_ids.ids,
[q_is_vegetarian.suggested_answer_ids[0].id, q_is_vegetarian.suggested_answer_ids[2].id])
# Not vegetarian choice
self.assertTrue(q_food_not_vegetarian_cloned.is_conditional)
self.assertTrue(bool(q_food_not_vegetarian_cloned.triggering_answer_ids))
# Correct conditional layout
self.assertEqual(q_food_not_vegetarian_cloned.triggering_question_id.id, q_is_vegetarian_cloned.id)
self.assertEqual(q_food_not_vegetarian_cloned.triggering_answer_id.id,
q_is_vegetarian_cloned.suggested_answer_ids[1].id)
self.assertEqual(q_food_not_vegetarian_cloned.triggering_answer_ids.ids,
q_is_vegetarian_cloned.suggested_answer_ids[1].ids)
# Doesn't reference the original survey
self.assertNotEqual(q_food_not_vegetarian_cloned.triggering_question_id.id, q_is_vegetarian.id)
self.assertNotEqual(q_food_not_vegetarian_cloned.triggering_answer_id.id,
q_is_vegetarian.suggested_answer_ids[1].id)
self.assertNotEqual(q_food_not_vegetarian_cloned.triggering_answer_ids.ids,
q_is_vegetarian.suggested_answer_ids[1].ids)
@users('survey_manager')
def test_copy_conditional_question_with_sequence_changed(self):
@ -318,11 +541,7 @@ class TestSurveyInternals(common.TestSurveyCommon):
q_2.write({'sequence': 100})
# Set a conditional question on the first question
q_1.write({
'is_conditional': True,
'triggering_question_id': q_2.id,
'triggering_answer_id': q_2.suggested_answer_ids[0].id,
})
q_1.write({'triggering_answer_ids': [Command.set([q_2.suggested_answer_ids[0].id])]})
(q_1 | q_2).invalidate_recordset()
@ -334,8 +553,159 @@ class TestSurveyInternals(common.TestSurveyCommon):
self.assertEqual(get_question_by_title(cloned_survey, 'Q2').sequence, q_2.sequence)
# Check that the conditional question is correctly copied to the right question
self.assertEqual(get_question_by_title(cloned_survey, 'Q1').triggering_question_id.title, q_1.triggering_question_id.title)
self.assertFalse(get_question_by_title(cloned_survey, 'Q2').triggering_question_id)
self.assertEqual(
get_question_by_title(cloned_survey, 'Q1').triggering_answer_ids[0].value, q_1.triggering_answer_ids[0].value
)
self.assertFalse(bool(get_question_by_title(cloned_survey, 'Q2').triggering_answer_ids))
@users('survey_manager')
def test_matrix_rows_display_name(self):
"""Check that matrix rows' display name is not changed."""
# A case's shape is: (question title, row value, expected row display names)
cases = [
(
'Question 1',
'Row A is short, so what?',
'Row A is short, so what?',
), (
'Question 2',
'Row B is a very long question, but it is shown by itself so there shouldn\'t be any change',
'Row B is a very long question, but it is shown by itself so there shouldn\'t be any change',
),
]
for question_title, row_value, exp_display_name in cases:
question = self.env['survey.question'].create({
'title': question_title,
'matrix_row_ids': [Command.create({'value': row_value})],
})
with self.subTest(question=question_title, row=row_value):
self.assertEqual(question.matrix_row_ids[0].display_name, exp_display_name)
@users('survey_manager')
def test_suggested_answer_display_name(self):
"""Check that answers' display name is not too long and allows to identify the question & answer.
When a matrix answer though, simply show the value as the question and row should be made
clear via the survey.user.input.line context."""
# A case's shape is: (question title, answer value, expected display name, additional create values)
cases = [
(
'Question 1',
'Answer A is short',
'Question 1 : Answer A is short',
{}
), (
'Question 2',
'Answer B is a very long answer, so it should itself be shortened or we would go too far',
'Question 2 : Answer B is a very long answer, so it should itself be shortened or we...',
{}
), (
'Question 3 is a very long question, so what can we do?',
'Answer A is short',
'Question 3 is a very long question, so what can we do? : Answer A is short',
{}
), (
'Question 4 is a very long question, so what can we do?',
'Answer B is a bit too long for Q4 now',
'Question 4 is a very long question, so what can... : Answer B is a bit too long for Q4 now',
{}
), (
'Question 5 is a very long question, so what can we do?',
'Answer C is so long that both the question and the answer will be shortened',
'Question 5 is a very long... : Answer C is so long that both the question and the...',
{}
), (
'Question 6',
'Answer A is short, so what?',
'Answer A is short, so what?',
{'question_type': 'matrix'},
), (
'Question 7',
'Answer B is a very long answer, but it is shown by itself so there shouldn\'t be any change',
'Answer B is a very long answer, but it is shown by itself so there shouldn\'t be any change',
{'question_type': 'matrix'},
),
]
for question_title, answer_value, exp_display_name, other_values in cases:
question = self.env['survey.question'].create({
'title': question_title,
'suggested_answer_ids': [Command.create({'value': answer_value})],
**other_values
})
with self.subTest(question=question_title, answer=answer_value):
self.assertEqual(question.suggested_answer_ids[0].display_name, exp_display_name)
@users('survey_manager')
def test_unlink_triggers(self):
# Create the survey questions
q_is_vegetarian_text = 'Are you vegetarian?'
q_is_vegetarian = self._add_question(
self.page_0, q_is_vegetarian_text, 'simple_choice', survey_id=self.survey.id, sequence=100,
labels=[{'value': 'Yes'}, {'value': 'No'}, {'value': 'It depends'}], constr_mandatory=True,
)
q_is_kinda_vegetarian_text = 'Would you prefer a veggie meal if possible?'
q_is_kinda_vegetarian = self._add_question(
self.page_0, q_is_kinda_vegetarian_text, 'simple_choice', survey_id=self.survey.id, sequence=101,
labels=[{'value': 'Yes'}, {'value': 'No'}], constr_mandatory=True, triggering_answer_ids=[
Command.link(q_is_vegetarian.suggested_answer_ids[1].id), # It depends
],
)
q_food_vegetarian_text = 'Choose your green meal'
veggie_question = self._add_question(
self.page_0, q_food_vegetarian_text, 'simple_choice', survey_id=self.survey.id, sequence=102,
labels=[{'value': 'Vegetarian pizza'}, {'value': 'Vegetarian burger'}], constr_mandatory=True,
triggering_answer_ids=[
Command.link(q_is_vegetarian.suggested_answer_ids[0].id), # Veggie
Command.link(q_is_kinda_vegetarian.suggested_answer_ids[0].id), # Would prefer veggie
])
q_food_not_vegetarian_text = 'Choose your meal'
not_veggie_question = self._add_question(
self.page_0, q_food_not_vegetarian_text, 'simple_choice', survey_id=self.survey.id, sequence=103,
labels=[{'value': 'Steak with french fries'}, {'value': 'Fish'}], constr_mandatory=True,
triggering_answer_ids=[
Command.link(q_is_vegetarian.suggested_answer_ids[1].id), # Not a veggie
Command.link(q_is_kinda_vegetarian.suggested_answer_ids[1].id), # Would not prefer veggie
],
)
q_is_kinda_vegetarian.unlink()
# Deleting one trigger but maintaining another keeps conditional behavior
self.assertTrue(bool(veggie_question.triggering_answer_ids))
q_is_vegetarian.suggested_answer_ids[0].unlink()
# Deleting answer Yes makes the following question always visible
self.assertFalse(bool(veggie_question.triggering_answer_ids))
# But the other is still conditional
self.assertEqual(not_veggie_question.triggering_answer_ids[0].id, q_is_vegetarian.suggested_answer_ids[0].id)
q_is_vegetarian.unlink()
# Now it will also be always visible
self.assertFalse(bool(not_veggie_question.triggering_answer_ids))
def test_get_correct_answers(self):
questions = self._create_one_question_per_type_with_scoring()
qtype_mapping = {q.question_type: q for q in questions}
expected_correct_answer = {
qtype_mapping['numerical_box'].id: 5,
qtype_mapping['date'].id: '10/16/2023',
qtype_mapping['datetime'].id: '11/17/2023 08:00:00 AM',
qtype_mapping['simple_choice'].id:
qtype_mapping['simple_choice'].suggested_answer_ids.filtered_domain([('value', '=', 'SChoice0')]).ids,
qtype_mapping['multiple_choice'].id:
qtype_mapping['multiple_choice'].suggested_answer_ids.filtered_domain([('value', 'in', ['MChoice0', 'MChoice1'])]).ids,
}
self.assertEqual(questions._get_correct_answers(), expected_correct_answer)
def test_get_pages_and_questions_to_show(self):
"""
@ -345,20 +715,15 @@ class TestSurveyInternals(common.TestSurveyCommon):
Structure of the test survey:
sequence | type | trigger | validity
sequence | type | trigger | validity
----------------------------------------------------------------------
1 | page, no description | / | X
2 | text_box | trigger is 6 | X
3 | numerical_box | trigger is 2 | X
4 | simple_choice | / | V
5 | page, description | / | V
6 | multiple_choice | / | V
7 | multiple_choice, no answers | / | V
8 | text_box | trigger is 6 | V
9 | matrix | trigger is 5 | X
10 | simple_choice | trigger is 7 | X
11 | simple_choice, no answers | trigger is 8 | X
12 | text_box | trigger is 11 | X
1 | page, no description | / | X
2 | simple_choice | trigger is 5 | X
3 | simple_choice | trigger is 2 | X
4 | page, description | / | V
5 | multiple_choice | / | V
6 | text_box | triggers are 5+7 | V
7 | multiple_choice | | V
"""
my_survey = self.env['survey.survey'].create({
@ -369,17 +734,12 @@ class TestSurveyInternals(common.TestSurveyCommon):
})
[
page_without_description,
text_box_1,
numerical_box,
_simple_choice_1,
page_with_description,
multiple_choice_1,
multiple_choice_2,
text_box_2,
matrix,
simple_choice_1,
simple_choice_2,
simple_choice_3,
text_box_3,
_page_with_description,
multiple_choice_1,
text_box_2,
multiple_choice_2,
] = self.env['survey.question'].create([{
'title': 'no desc',
'survey_id': my_survey.id,
@ -388,86 +748,51 @@ class TestSurveyInternals(common.TestSurveyCommon):
'is_page': True,
'description': False,
}, {
'title': 'text_box with invalid trigger',
'title': 'simple choice with invalid trigger',
'survey_id': my_survey.id,
'sequence': 2,
'is_page': False,
'question_type': 'simple_choice',
'suggested_answer_ids': [(0, 0, {'value': 'a'})],
}, {
'title': 'numerical box with trigger that is invalid',
'title': 'simple_choice with chained invalid trigger',
'survey_id': my_survey.id,
'sequence': 3,
'is_page': False,
'question_type': 'numerical_box',
}, {
'title': 'valid simple_choice',
'survey_id': my_survey.id,
'sequence': 4,
'is_page': False,
'question_type': 'simple_choice',
'suggested_answer_ids': [(0, 0, {'value': 'a'})],
}, {
'title': 'with desc',
'survey_id': my_survey.id,
'sequence': 5,
'sequence': 4,
'is_page': True,
'question_type': False,
'description': 'This page has a description',
}, {
'title': 'multiple choice not conditional',
'survey_id': my_survey.id,
'sequence': 6,
'sequence': 5,
'is_page': False,
'question_type': 'multiple_choice',
'suggested_answer_ids': [(0, 0, {'value': 'a'})]
}, {
'title': 'multiple_choice with no answers',
'title': 'text_box with valid trigger',
'survey_id': my_survey.id,
'sequence': 6,
'is_page': False,
'question_type': 'text_box',
}, {
'title': 'valid multiple_choice',
'survey_id': my_survey.id,
'sequence': 7,
'is_page': False,
'question_type': 'multiple_choice',
}, {
'title': 'text_box with valid trigger',
'survey_id': my_survey.id,
'sequence': 8,
'is_page': False,
'question_type': 'text_box',
}, {
'title': 'matrix with invalid trigger (page)',
'survey_id': my_survey.id,
'sequence': 9,
'is_page': False,
'question_type': 'matrix',
}, {
'title': 'simple choice w/ invalid trigger (no suggested_answer_ids)',
'survey_id': my_survey.id,
'sequence': 10,
'is_page': False,
'question_type': 'simple_choice',
}, {
'title': 'text_box w/ invalid trigger (not a mcq)',
'survey_id': my_survey.id,
'sequence': 11,
'is_page': False,
'question_type': 'simple_choice',
'suggested_answer_ids': False,
}, {
'title': 'text_box w/ invalid trigger (suggested_answer_ids is False)',
'survey_id': my_survey.id,
'sequence': 12,
'is_page': False,
'question_type': 'text_box',
'suggested_answer_ids': [(0, 0, {'value': 'a'})]
}])
text_box_1.write({'is_conditional': True, 'triggering_question_id': multiple_choice_1.id})
numerical_box.write({'is_conditional': True, 'triggering_question_id': text_box_1.id})
text_box_2.write({'is_conditional': True, 'triggering_question_id': multiple_choice_1.id})
matrix.write({'is_conditional': True, 'triggering_question_id': page_with_description.id})
simple_choice_2.write({'is_conditional': True, 'triggering_question_id': multiple_choice_2.id})
simple_choice_3.write({'is_conditional': True, 'triggering_question_id': text_box_2.id})
text_box_3.write({'is_conditional': True, 'triggering_question_id': simple_choice_3.id})
invalid_records = page_without_description + text_box_1 + numerical_box \
+ matrix + simple_choice_2 + simple_choice_3 + text_box_3
simple_choice_1.write({'triggering_answer_ids': multiple_choice_1.suggested_answer_ids})
simple_choice_2.write({'triggering_answer_ids': multiple_choice_1.suggested_answer_ids})
text_box_2.write({'triggering_answer_ids': (multiple_choice_1 | multiple_choice_2).suggested_answer_ids})
invalid_records = page_without_description + simple_choice_1 + simple_choice_2
question_and_page_ids = my_survey.question_and_page_ids
returned_questions_and_pages = my_survey._get_pages_and_questions_to_show()
@ -475,7 +800,7 @@ class TestSurveyInternals(common.TestSurveyCommon):
def test_survey_session_leaderboard(self):
"""Check leaderboard rendering with small (max) scores values."""
start_time = fields.datetime(2023, 7, 7, 12, 0, 0)
start_time = datetime.datetime(2023, 7, 7, 12, 0, 0)
test_survey = self.env['survey.survey'].create({
'title': 'Test This Survey',
'scoring_type': 'scoring_with_answers',
@ -507,3 +832,177 @@ class TestSurveyInternals(common.TestSurveyCommon):
'animate': True,
'leaderboard': test_survey._prepare_leaderboard_values()
})
def test_notify_subscribers(self):
"""Check that messages are posted only if there are participation followers"""
survey_2 = self.survey.copy()
survey_participation_subtype = self.env.ref('survey.mt_survey_survey_user_input_completed')
user_input_participation_subtype = self.env.ref('survey.mt_survey_user_input_completed')
# Make survey_user (group_survey_user) follow participation to survey (they follow), not survey 2 (no followers)
self.survey.message_subscribe(partner_ids=self.survey_user.partner_id.ids, subtype_ids=survey_participation_subtype.ids)
# Complete a participation for both surveys, only one should trigger a notification for followers
user_inputs = self.env['survey.user_input'].create([{'survey_id': survey.id} for survey in (self.survey, survey_2)])
with self.mock_mail_app():
user_inputs._mark_done()
self.assertEqual(len(self._new_msgs), 1)
self.assertMessageFields(
self._new_msgs,
{
'model': 'survey.user_input',
'subtype_id': user_input_participation_subtype,
'res_id': user_inputs[0].id,
'notified_partner_ids': self.survey_user.partner_id
},
)
def test_survey_session_speed_reward_config_propagation(self):
"""Check the speed rating time limit propagation to non time-customized questions."""
test_survey = self.env['survey.survey'].create({
'title': 'Test This Survey',
'scoring_type': 'scoring_with_answers',
'question_and_page_ids': [
Command.create({
'is_time_customized': True,
'is_time_limited': True,
'time_limit': 30,
'title': 'Question A',
}), Command.create({
'is_time_customized': True,
'is_time_limited': True,
'time_limit': 40,
'title': 'Question B',
}), Command.create({
'time_limit': 11, # left-over somehow
'title': 'Question C',
}),
],
})
self.assertFalse(test_survey.session_speed_rating)
test_survey.write({'session_speed_rating': True, 'session_speed_rating_time_limit': 30})
self.assertEqual(test_survey.session_speed_rating_time_limit, 30)
self.assertSetEqual({*test_survey.question_ids.mapped('is_time_limited')}, {True})
self.assertListEqual(test_survey.question_ids.mapped('time_limit'), [30, 40, 30])
self.assertListEqual(test_survey.question_ids.mapped('is_time_customized'), [False, True, False])
test_survey.session_speed_rating_time_limit = 40
self.assertSetEqual({*test_survey.question_ids.mapped('time_limit')}, {40})
self.assertSetEqual({*test_survey.question_ids.mapped('is_time_customized')}, {False})
test_survey.question_ids[:2].write({
"is_time_limited": False,
'is_time_customized': True, # As would the client do
})
self.assertListEqual(test_survey.question_ids.mapped('is_time_limited'), [False, False, True])
self.assertListEqual(test_survey.question_ids.mapped('is_time_customized'), [True, True, False])
test_survey.session_speed_rating_time_limit = 20
self.assertListEqual(test_survey.question_ids.mapped('is_time_limited'), [False, False, True])
self.assertListEqual(test_survey.question_ids.mapped('is_time_customized'), [True, True, False])
self.assertEqual(test_survey.question_ids[2].time_limit, 20)
test_survey.session_speed_rating = False
self.assertSetEqual({*test_survey.question_ids.mapped('is_time_limited')}, {False})
self.assertSetEqual({*test_survey.question_ids.mapped('is_time_customized')}, {False})
# test update in batch
test_survey.write({'session_speed_rating': True, 'session_speed_rating_time_limit': 30})
self.assertSetEqual({*test_survey.question_ids.mapped('is_time_limited')}, {True})
self.assertSetEqual({*test_survey.question_ids.mapped('time_limit')}, {30})
self.assertSetEqual({*test_survey.question_ids.mapped('is_time_customized')}, {False})
def test_survey_session_speed_reward_default_applied(self):
"""Check that new questions added to a survey with speed reward will apply defaults."""
test_survey = self.env['survey.survey'].create({
'title': 'Test This Survey',
'scoring_type': 'scoring_with_answers',
'session_speed_rating': True,
'session_speed_rating_time_limit': 60,
})
question_1, question_2, question_3, question_4 = self.env['survey.question'].create([{
'is_time_limited': True, # from client, unedited time limits (from default_get)
'question_type': 'numerical_box',
'survey_id': test_survey.id,
'time_limit': 60,
'title': 'Question 1',
}, {
'survey_id': test_survey.id, # simple values (via rpc for example), will be updated to is_time_customized
'question_type': 'numerical_box',
'title': 'Question 2',
}, {
'is_time_customized': True,
'is_time_limited': False,
'question_type': 'numerical_box',
'survey_id': test_survey.id,
'title': 'Question 3',
}, {
'is_time_customized': True, # override in client
'is_time_limited': True,
'question_type': 'numerical_box',
'survey_id': test_survey.id,
'time_limit': 30,
'title': 'Question 4',
},
])
self.assertTrue(question_1.is_time_limited)
self.assertEqual(question_1.time_limit, 60)
self.assertFalse(question_1.is_time_customized)
self.assertFalse(question_2.is_time_limited)
self.assertFalse(question_2.time_limit)
self.assertTrue(question_2.is_time_customized)
self.assertFalse(question_3.is_time_limited)
self.assertTrue(question_3.is_time_customized)
self.assertFalse(question_2.time_limit)
self.assertTrue(question_4.is_time_limited)
self.assertEqual(question_4.time_limit, 30)
self.assertTrue(question_4.is_time_customized)
def test_survey_time_limits_results(self):
"""Check that speed-related scores awarded are correctly computed."""
start_time = datetime.datetime(2023, 7, 7, 12, 0, 0)
test_survey = self.env['survey.survey'].create({
'title': 'Test This Survey',
'scoring_type': 'scoring_with_answers',
'scoring_success_min': 80.0,
'session_speed_rating': True,
'session_speed_rating_time_limit': 30,
'session_question_start_time': start_time,
})
q_01 = self.env['survey.question'].create([{
'is_time_customized': True,
'is_time_limited': True,
'question_type': 'simple_choice',
'suggested_answer_ids': [
Command.create({'value': 'In Asia', 'answer_score': 5.0, 'is_correct': True}),
Command.create({'value': 'In Europe', 'answer_score': 0., 'is_correct': False}),
],
'survey_id': test_survey.id,
'time_limit': 60,
'title': 'Where is india?',
}])
test_survey.session_question_id = q_01
answer_correct, answer_incorrect = q_01.suggested_answer_ids
user_input = self.env['survey.user_input'].create({'survey_id': test_survey.id, 'is_session_answer': True})
for (seconds_since_start, answer), expected_score in zip(
[
(61, answer_correct), # time limit elapsed
(61, answer_incorrect),
(31, answer_correct), # half of time limit elapsed
(31, answer_incorrect),
(2, answer_correct), # end of max_score_delay
(2, answer_incorrect),
], [2.5, 0.0, 3.75, 0.0, 5.0, 0.0], # 2.5 if succeeded + up to 2.5 depending on time to answer
):
with (self.subTest(elapsed=seconds_since_start, is_correct=answer.is_correct),
freeze_time(start_time + datetime.timedelta(seconds=seconds_since_start))):
user_input_line = self.env['survey.user_input.line'].create({
'user_input_id': user_input.id,
'question_id': q_01.id,
'answer_type': 'suggestion',
'suggested_answer_id': answer.id,
})
self.assertEqual(user_input_line.answer_score, expected_score)

View file

@ -0,0 +1,131 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
from odoo import Command
from odoo.addons.survey.tests import common
from odoo.tests.common import HttpCase
class TestSurveyController(common.TestSurveyCommon, HttpCase):
def test_submit_route_scoring_after_page(self):
""" Check that the submit route for a scoring after page survey is returning the
accurate correct answers depending on the survey layout and the active page questions.
The correct answers of the inactive conditional questions shouldn't be returned.
"""
survey = self.env['survey.survey'].create({
'title': 'How much do you know about words?',
'scoring_type': 'scoring_with_answers_after_page',
})
(
a_q1_partial, a_q1_correct, a_q1_incorrect,
a_q2_incorrect, a_q2_correct,
a_q3_correct, a_q3_incorrect
) = self.env['survey.question.answer'].create([
{'value': 'A thing full of letters.', 'answer_score': 1.0},
{'value': 'A unit of language, [...], carrying a meaning.', 'answer_score': 4.0, 'is_correct': True},
{'value': 'A thing related to space', 'answer_score': -4.0},
{'value': 'Yes', 'answer_score': -0.5},
{'value': 'No', 'answer_score': 0.5, 'is_correct': True},
{'value': 'Yes', 'answer_score': 0.5, 'is_correct': True},
{'value': 'No', 'answer_score': 0.2},
])
q1, q2, q3 = self.env['survey.question'].create([{
'survey_id': survey.id,
'title': 'What is a word?',
'sequence': 2,
'question_type': 'simple_choice',
'suggested_answer_ids': [Command.set((a_q1_partial | a_q1_correct | a_q1_incorrect).ids)],
'constr_mandatory': False,
}, {
'survey_id': survey.id,
'title': 'Are you sure?',
'sequence': 3,
'question_type': 'simple_choice',
'suggested_answer_ids': [Command.set((a_q2_incorrect | a_q2_correct).ids)],
'triggering_answer_ids': [Command.set((a_q1_partial | a_q1_incorrect).ids)],
'constr_mandatory': False,
}, {
'survey_id': survey.id,
'title': 'Are you sure?',
'sequence': 5,
'question_type': 'simple_choice',
'suggested_answer_ids': [Command.set((a_q3_correct | a_q3_incorrect).ids)],
'triggering_answer_ids': [Command.set([a_q1_correct.id])],
'constr_mandatory': False,
}])
pages = [
{'is_page': True, 'question_type': False, 'sequence': 1, 'title': 'Page 0', 'survey_id': survey.id},
{'is_page': True, 'question_type': False, 'sequence': 4, 'title': 'Page 1', 'survey_id': survey.id},
]
q1_correct_answer = {str(q1.id): [a_q1_correct.id]}
cases = [
('page_per_question', [], q1_correct_answer),
('page_per_question', a_q1_correct, q1_correct_answer),
('page_per_question', a_q1_incorrect, q1_correct_answer),
('one_page', [], q1_correct_answer), # skipping gives answers for active questions (q2 and q3 conditional questions are inactive)
('one_page', a_q1_correct, {**q1_correct_answer, str(q3.id): [a_q3_correct.id]}),
('one_page', a_q1_partial, {**q1_correct_answer, str(q2.id): [a_q2_correct.id]}),
# page0 contains q1 and q2, page1 contains q3
('page_per_section', [], q1_correct_answer),
('page_per_section', a_q1_correct, q1_correct_answer), # no correct answers for q3 because q3 is not on the same page as q1
('page_per_section', a_q1_partial, {**q1_correct_answer, str(q2.id): [a_q2_correct.id]}),
]
for case_index, (layout, answer_q1, expected_correct_answers) in enumerate(cases):
with self.subTest(case_index=case_index, layout=layout):
survey.questions_layout = layout
if layout == 'page_per_section':
page0, _ = self.env['survey.question'].create(pages)
cookie_key = f'survey_{survey.access_token}'
# clear the cookie to start a new survey
self.opener.cookies.pop(cookie_key, None)
response = self._access_start(survey)
self.assertTrue(response.history, "Survey start should redirect")
cookie_token = response.history[0].cookies.get(cookie_key)
user_input = self.env['survey.user_input'].search([('access_token', '=', cookie_token)])
answer_token = user_input.access_token
self.assertTrue(cookie_token)
self.assertTrue(user_input)
r = self._access_page(survey, answer_token)
self.assertResponse(r, 200)
csrf_token = self._find_csrf_token(response.text)
r = self._access_begin(survey, answer_token)
self.assertResponse(r, 200)
post_data = {'csrf_token': csrf_token, 'token': answer_token}
post_data[q1.id] = answer_q1.id if answer_q1 else answer_q1
if layout == 'page_per_question':
post_data['question_id'] = q1.id
elif layout == 'page_per_section':
post_data['page_id'] = page0.id
# Submit answers and check the submit route is returning the accurate correct answers
response = self._access_submit(survey, answer_token, post_data)
self.assertResponse(response, 200)
self.assertEqual(response.json()['result'][0], expected_correct_answers)
user_input.invalidate_recordset() # TDE note: necessary as lots of sudo in controllers messing with cache
def test_live_session_without_question(self):
"""Test that the live session ('Thank You' page) does not crash when no question is present."""
survey = self.env['survey.survey'].with_user(self.survey_manager).create({
'title': 'Live Session Survey',
'access_mode': 'token',
'users_login_required': False,
'session_question_start_time': datetime.datetime(2023, 7, 7, 12, 0, 0),
})
self.authenticate(self.survey_manager.login, self.survey_manager.login)
# Call the url without any question
session_manage_url = f'/survey/session/manage/{survey.access_token}'
response = self.url_open(session_manage_url)
self.assertEqual(response.status_code, 200, "Should be able to open live session manage page")

View file

@ -6,7 +6,7 @@ from odoo.tests import tagged
from odoo.tests.common import HttpCase
@tagged('-at_install', 'post_install', 'functional')
@tagged('-at_install', 'post_install', 'functional', 'is_query_count')
class TestSurveyFlow(common.TestSurveyCommon, HttpCase):
def _format_submission_data(self, page, answer_data, additional_post_data):
post_data = {}
@ -95,7 +95,7 @@ class TestSurveyFlow(common.TestSurveyCommon, HttpCase):
page0_q1.id: {'value': ['44.0']},
}
post_data = self._format_submission_data(page_0, answer_data, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': 'next'})
r = self._access_submit(survey, answer_token, post_data)
r = self._access_submit(survey, answer_token, post_data, query_count=45) # ! 45 without `website` (single app CI), 38 `survey+website`, 39 "full" runbot
self.assertResponse(r, 200)
answers.invalidate_recordset() # TDE note: necessary as lots of sudo in controllers messing with cache
@ -113,7 +113,7 @@ class TestSurveyFlow(common.TestSurveyCommon, HttpCase):
page1_q0.id: {'value': [page1_q0.suggested_answer_ids.ids[0], page1_q0.suggested_answer_ids.ids[1]]},
}
post_data = self._format_submission_data(page_1, answer_data, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': 'next'})
r = self._access_submit(survey, answer_token, post_data)
r = self._access_submit(survey, answer_token, post_data, query_count=40) # ! 37 without `website`, 32 `survey+website`, 40 "full" runbot
self.assertResponse(r, 200)
answers.invalidate_recordset() # TDE note: necessary as lots of sudo in controllers messing with cache

View file

@ -41,7 +41,7 @@ class TestSurveyFlowWithConditions(common.TestSurveyCommon, HttpCase):
page_0, 'Question 2', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
is_conditional=True, triggering_question_id=q01.id, triggering_answer_id=q01.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
triggering_answer_ids=q01.suggested_answer_ids.filtered(lambda q: q.is_correct),
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
@ -60,11 +60,13 @@ class TestSurveyFlowWithConditions(common.TestSurveyCommon, HttpCase):
{'value': 'Answer 4', 'is_correct': True, 'answer_score': 1.0}
])
q03_suggested_answers_triggering_q04 = q03.suggested_answer_ids.filtered(lambda q: q.is_correct)
self._add_question( # q04
page_0, 'Question 4', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
is_conditional=True, triggering_question_id=q03.id, triggering_answer_id=q03.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
triggering_answer_ids=q03_suggested_answers_triggering_q04,
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
@ -87,7 +89,23 @@ class TestSurveyFlowWithConditions(common.TestSurveyCommon, HttpCase):
page_0, 'Question 6', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
is_conditional=True, triggering_question_id=q05.id, triggering_answer_id=q05.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
triggering_answer_ids=q05.suggested_answer_ids.filtered(lambda q: q.is_correct),
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
{'value': 'Answer 3'},
{'value': 'Answer 4'}
])
q03_suggested_answers_triggering_q07 = q03.suggested_answer_ids - q03_suggested_answers_triggering_q04
# Make sure to have a case with multiple possible triggers.
self.assertGreater(len(q03_suggested_answers_triggering_q07), 1)
q07 = self._add_question(
page_0, 'Question 7', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
triggering_answer_ids=q03_suggested_answers_triggering_q07,
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
@ -101,6 +119,7 @@ class TestSurveyFlowWithConditions(common.TestSurveyCommon, HttpCase):
# -> this should have generated a new user_input with a token
user_inputs = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
self.assertEqual(len(user_inputs), 1)
self.assertEqual(len(user_inputs.predefined_question_ids), 7)
answer_token = user_inputs.access_token
# User begins survey with first page
@ -117,10 +136,12 @@ class TestSurveyFlowWithConditions(common.TestSurveyCommon, HttpCase):
q03: q03.suggested_answer_ids[0], # Wrong
q05: q05.suggested_answer_ids[3], # Right
q06: q06.suggested_answer_ids[2], # Wrong
q07: q07.suggested_answer_ids[1], # Right
}
self._answer_page(page_0, answers, answer_token, csrf_token)
self.assertEqual(len(user_inputs.predefined_question_ids), 6, "q04 should have been removed as not triggered.")
user_inputs.invalidate_recordset()
self.assertEqual(round(user_inputs.scoring_percentage), 60, "Three right answers out of five (the fourth one is still hidden)")
self.assertEqual(round(user_inputs.scoring_percentage), 67, "Four right answers out of six questions asked.")
self.assertFalse(user_inputs.scoring_success)

View file

@ -7,13 +7,13 @@ from lxml import etree
from odoo import fields, Command
from odoo.addons.survey.tests import common
from odoo.addons.test_mail.tests.common import MailCommon
from odoo.addons.mail.tests.common import MailCase
from odoo.exceptions import UserError
from odoo.tests import Form
from odoo.tests.common import users
class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
class TestSurveyInvite(common.TestSurveyCommon, MailCase):
def setUp(self):
res = super(TestSurveyInvite, self).setUp()
@ -27,7 +27,7 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
# By default, `<field name="emails"/>` is invisible when `survey_users_login_required` is True,
# making it normally impossible to change by the user in the web client by default.
# For tests `test_survey_invite_authentication_nosignup` and `test_survey_invite_token_internal`
tree.xpath('//field[@name="emails"]')[0].attrib.pop('attrs')
tree.xpath('//field[@name="emails"]')[0].attrib.pop('invisible', None)
view.arch = etree.tostring(tree)
return res
@ -37,34 +37,83 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
action = self.survey.action_send_survey()
self.assertEqual(action['res_model'], 'survey.invite')
# Bad cases
surveys = [
# no page
self.env['survey.survey'].create({'title': 'Test survey'}),
# no questions
self.env['survey.survey'].create({'title': 'Test survey', 'question_and_page_ids': [(0, 0, {'is_page': True, 'question_type': False, 'title': 'P0', 'sequence': 1})]}),
# closed
self.env['survey.survey'].with_user(self.survey_manager).create({
'title': 'S0',
bad_cases = [
{}, # empty
{ # no question
'question_and_page_ids': [Command.create({'is_page': True, 'question_type': False, 'title': 'P0', 'sequence': 1})],
}, {
# scored without positive score obtainable
'scoring_type': 'scoring_with_answers',
'question_and_page_ids': [Command.create({'question_type': 'numerical_box', 'title': 'Q0', 'sequence': 1})],
}, {
# scored without positive score obtainable from simple choice
'scoring_type': 'scoring_with_answers',
'question_and_page_ids': [Command.create({
'question_type': 'simple_choice',
'title': 'Q0', 'sequence': 1,
'suggested_answer_ids': [
Command.create({'value': '1', 'answer_score': 0}),
Command.create({'value': '2', 'answer_score': 0}),
],
})],
}, {
# closed
'active': False,
'question_and_page_ids': [
(0, 0, {'is_page': True, 'question_type': False, 'title': 'P0', 'sequence': 1}),
(0, 0, {'title': 'Q0', 'sequence': 2, 'question_type': 'text_box'})
]
})
Command.create({'is_page': True, 'question_type': False, 'title': 'P0', 'sequence': 1}),
Command.create({'title': 'Q0', 'sequence': 2, 'question_type': 'text_box'})
],
},
]
for survey in surveys:
good_cases = [
{
# scored with positive score obtainable
'scoring_type': 'scoring_with_answers',
'question_and_page_ids': [
Command.create({'question_type': 'numerical_box', 'title': 'Q0', 'sequence': 1, 'answer_score': 1}),
],
}, {
# scored with positive score obtainable from simple choice
'scoring_type': 'scoring_with_answers',
'question_and_page_ids': [
Command.create({ # not sufficient
'question_type': 'simple_choice',
'title': 'Q0', 'sequence': 1,
'suggested_answer_ids': [
Command.create({'value': '1', 'answer_score': 0}),
Command.create({'value': '2', 'answer_score': 0}),
],
}),
Command.create({ # sufficient even if not 'is_correct'
'question_type': 'simple_choice',
'title': 'Q1', 'sequence': 2,
'suggested_answer_ids': [
Command.create({'value': '1', 'answer_score': 0}),
Command.create({'value': '2', 'answer_score': 1}),
],
}),
],
},
]
surveys = self.env['survey.survey'].with_user(self.survey_manager).create([
{'title': 'Test survey', **case} for case in bad_cases + good_cases
])
for survey in surveys[:len(bad_cases)]:
with self.assertRaises(UserError):
survey.action_send_survey()
for survey in surveys[len(bad_cases):]:
survey.action_send_survey()
@users('survey_manager')
def test_survey_invite(self):
Answer = self.env['survey.user_input']
deadline = fields.Datetime.now() + relativedelta(months=1)
self.survey.write({'access_mode': 'public', 'users_login_required': False})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, self.survey.action_send_survey())
invite_form.send_email = True
# some lowlevel checks that action is correctly configured
self.assertEqual(Answer.search([('survey_id', '=', self.survey.id)]), self.env['survey.user_input'])
@ -85,22 +134,21 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
self.assertEqual(set(answers.mapped('deadline')), set([deadline]))
with self.subTest('Warning when inviting an already invited partner'):
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, self.survey.action_send_survey())
invite_form.send_email = True
invite_form.partner_ids.add(self.customer)
self.assertIn(self.customer, invite_form.existing_partner_ids)
self.assertEqual(invite_form.existing_text,
'The following customers have already received an invite: Caroline Customer.')
@users('survey_manager')
def test_survey_invite_authentication_nosignup(self):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'public', 'users_login_required': True})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, self.survey.action_send_survey())
invite_form.send_email = True
with self.assertRaises(UserError): # do not allow to add customer (partner without user)
invite_form.partner_ids.add(self.customer)
@ -118,7 +166,7 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
self.assertEqual(len(answers), 2)
self.assertEqual(
set(answers.mapped('email')),
set([self.user_emp.email, self.user_portal.email]))
{self.user_emp.email, self.user_portal.email})
self.assertEqual(answers.mapped('partner_id'), self.user_emp.partner_id | self.user_portal.partner_id)
@users('survey_manager')
@ -128,8 +176,8 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'public', 'users_login_required': True})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, self.survey.action_send_survey())
invite_form.send_email = True
invite_form.partner_ids.add(self.customer)
invite_form.partner_ids.add(self.user_portal.partner_id)
@ -150,10 +198,14 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
@users('survey_manager')
def test_survey_invite_email_from(self):
# Verifies whether changing the value of the "email_from" field reflects on the receiving end.
# by default avoid rendering restriction complexity
self.env['ir.config_parameter'].sudo().set_param('mail.restrict.template.rendering', False)
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
action['context']['default_send_email'] = True
invite_form = Form.from_action(self.env, action)
invite_form.partner_ids.add(self.survey_user.partner_id)
invite_form.template_id.write({'email_from':'{{ object.partner_id.email_formatted }}'})
invite_form.template_id.write({'email_from': '{{ object.partner_id.email_formatted }}'})
invite = invite_form.save()
with self.mock_mail_gateway():
invite.action_invite()
@ -167,8 +219,8 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'public', 'users_login_required': False})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, self.survey.action_send_survey())
invite_form.send_email = True
invite_form.partner_ids.add(self.customer)
invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
@ -188,8 +240,7 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'token', 'users_login_required': False})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, self.survey.action_send_survey())
invite_form.partner_ids.add(self.customer)
invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
@ -209,8 +260,7 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'token', 'users_login_required': True})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, self.survey.action_send_survey())
with self.assertRaises(UserError): # do not allow to add customer (partner without user)
invite_form.partner_ids.add(self.customer)
@ -249,8 +299,7 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
})
self.survey.write({'access_mode': 'token', 'users_login_required': False})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, self.survey.action_send_survey())
invite_form.emails = 'test@example.com'
invite = invite_form.save()
invite.action_invite()
@ -295,8 +344,8 @@ class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
]
})
action = user_survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form = Form.from_action(self.env, user_survey.action_send_survey())
invite_form.send_email = True
invite_form.template_id = mail_template
invite_form.emails = 'test_survey_invite_with_template_attachment@odoo.gov'
invite = invite_form.save()

View file

@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests import common
from odoo.tests import tagged
from odoo.tests.common import warmup, HttpCase
@tagged('post_install', '-at_install')
class SurveyPerformance(common.TestSurveyResultsCommon, HttpCase):
@warmup
def test_survey_results_with_multiple_filters_mixed_model(self):
""" Check that, in comparison with having filters from the same model,
having filters from different models needs only a few more queries.
"""
url = f'/survey/results/{self.survey.id}?filters=A,0,{self.gras_id}|L,0,{self.answer_pauline.id}'
self.authenticate('survey_manager', 'survey_manager')
# cold orm/fields cache (only survey: 26, all module: 23)
# the extra requests are `_get_default_lang` which is not called when website is installed
with self.assertQueryCount(default=26):
self.url_open(url)
@warmup
def test_survey_results_with_multiple_filters_question_answer_model(self):
""" Check that no matter the number of filters, if their answers
data are stored in the same model (here survey.question.answer)
the query count stay the same as having a single filter.
"""
url = f'/survey/results/{self.survey.id}?filters=A,0,{self.gras_id}|A,0,{self.cat_id}'
self.authenticate('survey_manager', 'survey_manager')
# cold orm/fields cache (only survey: 24, all module: 21)
# the extra requests are `_get_default_lang` which is not called when website is installed
with self.assertQueryCount(default=24):
self.url_open(url)
@warmup
def test_survey_results_with_one_filter(self):
url = f'/survey/results/{self.survey.id}?filters=A,0,{self.cat_id}'
self.authenticate('survey_manager', 'survey_manager')
# cold orm/fields cache (only survey: 24, all module: 21)
# the extra requests are `_get_default_lang` which is not called when website is installed
with self.assertQueryCount(default=24):
self.url_open(url)

View file

@ -55,7 +55,7 @@ class TestSurveyRandomize(TransactionCase):
def _add_questions(self, question_and_pages, page, count):
for i in range(count):
question_and_pages |= self.env['survey.question'].sudo().create({
'title': page.title + ' Q' + str(i + 1),
'title': f'{page.title} Q{i + 1}',
'sequence': page.sequence + (i + 1)
})

View file

@ -0,0 +1,168 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo.tests import tagged
from odoo.addons.survey.controllers.main import Survey
from odoo.addons.survey.tests import common
from odoo.addons.http_routing.tests.common import MockRequest
@tagged("is_query_count")
class TestSurveyResults(common.TestSurveyResultsCommon):
""" Check the results and the performance of the different filters combinations.
The filters can be combined but their query count doesn't add up if their
related question answers data are stored in the same model.
"""
def setUp(self):
super(TestSurveyResults, self).setUp()
self.SurveyController = Survey()
def test_get_filters_from_post(self):
""" Check that the filters are correctly retrieved from the post. """
# Matrix filter | simple_choice or multiple_choice filter | char_box, text_box, numerical_box, date or datetime filter
post = {'filters': 'A,14,101|A,0,58|L,0,2'}
with MockRequest(self.env):
answer_by_column, user_input_lines_ids = self.SurveyController._get_filters_from_post(post)
self.assertEqual(answer_by_column, {101: [14], 58: []})
self.assertEqual(user_input_lines_ids, [2])
# Multiple matrix filters
post = {'filters': 'A,14,101|A,20,205'}
with MockRequest(self.env):
answer_by_column, user_input_lines_ids = self.SurveyController._get_filters_from_post(post)
self.assertEqual(answer_by_column, {101: [14], 205: [20]})
self.assertFalse(user_input_lines_ids)
# Multiple filters on the same matrix column
post = {'filters': 'A,14,101|A,20,101'}
with MockRequest(self.env):
answer_by_column, user_input_lines_ids = self.SurveyController._get_filters_from_post(post)
self.assertEqual(answer_by_column, {101: [14, 20]})
self.assertFalse(user_input_lines_ids)
# No model associated with the J letter, the second filter should be ignored
post = {'filters': 'A,0,9|J,40,3'}
with MockRequest(self.env):
answer_by_column, user_input_lines_ids = self.SurveyController._get_filters_from_post(post)
self.assertEqual(answer_by_column, {9: []})
self.assertFalse(user_input_lines_ids)
def test_results_page_filters_survey_matrix(self):
""" Same as 'test_results_page_filters_survey_question_answer_model'
but with a matrix-type question (additional record involved for the row)
"""
post = {'filters': f'A,{self.strawberries_row_id},{self.spring_id}'}
expected_user_input_lines = self.user_input_1.user_input_line_ids + self.user_input_2.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 3)
def test_results_page_filters_survey_matrix_mixed_models(self):
""" Same as 'test_results_page_filters_survey_mixed_models'
but with a matrix-type question (additional record involved for the row)
"""
post = {'filters': f'A,{self.strawberries_row_id},{self.spring_id}|L,0,{self.answer_pauline.id}'}
expected_user_input_lines = self.user_input_2.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 5)
def test_results_page_filters_survey_matrix_multiple(self):
""" Same as 'test_results_page_filters_survey_question_answer_model_multiple'
but with matrix-type questions (additional records involved for the rows)
"""
post = {'filters': f'A,{self.strawberries_row_id},{self.spring_id}|A,{self.ficus_row_id},{self.once_a_week_id}'}
expected_user_input_lines = self.user_input_1.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 3)
def test_results_page_filters_survey_matrix_multiple_same_column(self):
""" Same as 'test_results_page_filters_survey_matrix_multiple' but
checking the case where the same answer id can be related to multiple row ids.
"""
post = {'filters': f'A,{self.strawberries_row_id},{self.spring_id}|A,{self.apples_row_id},{self.spring_id}'}
expected_user_input_lines = self.user_input_2.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 3)
def test_results_page_filters_survey_mixed_models(self):
""" Check results and performance of matching filters across different answer models
('survey.question.answer' and 'survey.user_input.line' models)
"""
post = {'filters': f'A,0,{self.gras_id}|L,0,{self.answer_pauline.id}'}
expected_user_input_lines = self.user_input_2.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 5)
def test_results_page_filters_survey_question_answer_model(self):
""" Check results and performance of matching one filter using
the 'survey.question.answer' answer model
"""
post = {'filters': f'A,0,{self.gras_id}'}
expected_user_input_lines = self.user_input_1.user_input_line_ids + self.user_input_2.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 3)
def test_results_page_filters_survey_question_answer_model_multiple(self):
""" Check results and performance of matching multiple filters using
the 'survey.question.answer' answer model
"""
post = {'filters': f'A,0,{self.gras_id}|A,0,{self.cat_id}'}
expected_user_input_lines = self.user_input_1.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 3)
def test_results_page_filters_survey_user_input_line_model(self):
""" Check results and performance of matching one filter using
the 'survey.user_input.line' answer model
"""
post = {'filters': f'L,0,{self.answer_24.id}'}
expected_user_input_lines = self.user_input_1.user_input_line_ids + self.user_input_2.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 3)
def test_results_page_filters_survey_user_input_line_model_multiple(self):
""" Check results and performance of matching multiple filters using
the 'survey.user_input.line' answer model
"""
post = {'filters': f'L,0,{self.answer_24.id}|L,0,{self.answer_pauline.id}'}
expected_user_input_lines = self.user_input_2.user_input_line_ids
self._check_results_and_query_count(post, expected_user_input_lines, 3)
def test_statistics_scale(self):
""" Test statistics of scale question. """
with MockRequest(self.env):
found_user_input_lines, _ = self.SurveyController._extract_filters_data(self.survey, {})
data = self.question_scale._prepare_statistics(found_user_input_lines)[0]
self.assertEqual(data['table_data'],
[{'value': str(value),
'suggested_answer': self.env['survey.question.answer'],
'count': 1 if value in (5, 7) else 0,
'count_text': f"{1 if value in (5, 7) else 0} Votes"}
for value in range(11)])
self.assertEqual(json.loads(data['graph_data']),
[{'key': self.question_scale.title,
'values': [{'text': str(value),
'count': 1 if value in (5, 7) else 0}
for value in range(11)]}])
self.assertEqual(data['numerical_max'], 7)
self.assertEqual(data['numerical_min'], 5)
self.assertEqual(data['numerical_average'], 6)
# Test that a skipped value is not interpreted as a 0 value
self.scale_answer_line_2.write({
'value_scale': False,
'skipped': True,
'answer_type': False,
})
data = self.question_scale._prepare_statistics(found_user_input_lines)[0]
self.assertEqual(data['table_data'],
[{'value': str(value),
'suggested_answer': self.env['survey.question.answer'],
'count': 1 if value == 5 else 0,
'count_text': f"{1 if value == 5 else 0} Votes"}
for value in range(11)])
self.assertEqual(data['numerical_max'], 5)
self.assertEqual(data['numerical_min'], 5)
self.assertEqual(data['numerical_average'], 5)
def _check_results_and_query_count(self, post, expected_user_input_lines, expected_query_count):
""" Check that, depending on the URL filters, the _extract_filters_data method
is correctly returning the expected user input lines.
"""
self.env.invalidate_all() # clear env cache to not impact the query count
with MockRequest(self.env), self.assertQueryCount(expected_query_count):
found_user_input_lines, _ = self.SurveyController._extract_filters_data(self.survey, post)
self.assertEqual(expected_user_input_lines, found_user_input_lines)

View file

@ -4,7 +4,7 @@
import datetime
from odoo.addons.survey.tests import common
from odoo.exceptions import AccessError, UserError
from odoo.exceptions import AccessError, ValidationError
from odoo.tests import tagged
from odoo.tests.common import users, HttpCase
from odoo.tools import mute_logger
@ -140,25 +140,34 @@ class TestAccess(common.TestSurveyCommon):
@mute_logger('odoo.addons.base.models.ir_model')
@users('survey_user')
def test_access_survey_survey_user(self):
# Create: own only
survey = self.env['survey.survey'].create({'title': 'Test Survey 2'})
self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': survey.id})
self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'survey_id': survey.id})
# Restrict common survey to survey_manager
restricted_to_other_survey = self.survey
self.assertEqual(self.survey_manager, restricted_to_other_survey.user_id)
restricted_to_other_survey.write({'restrict_user_ids': [[4, restricted_to_other_survey.user_id.id]]})
# Read: all
# Create: restricted to self or no one
unrestricted_survey = self.env['survey.survey'].create({'title': 'Test Survey Unrestricted'})
self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': unrestricted_survey.id})
self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'survey_id': unrestricted_survey.id})
restricted_to_self_survey = self.env['survey.survey'].create({'title': 'Test Survey Restricted to Self', 'restrict_user_ids': [[4, self.env.user.id]]})
with self.assertRaises(ValidationError):
self.env['survey.survey'].with_user(self.env.user).create({
'title': 'Test Survey Restricted to Other', 'restrict_user_ids': [[4, restricted_to_other_survey.user_id.id]]})
# Read: restricted to self or no one
surveys = self.env['survey.survey'].search([('title', 'ilike', 'Test')])
self.assertEqual(surveys, self.survey | survey)
self.assertEqual(surveys, restricted_to_self_survey | unrestricted_survey)
surveys.read(['title'])
# Write: own only
survey.write({'title': 'New Title'})
# Write: restricted to self or no one
(unrestricted_survey + restricted_to_self_survey).write({'title': 'New Title'})
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).write({'title': 'New Title'})
restricted_to_other_survey.with_user(self.env.user).write({'title': 'New Title'})
# Unlink: own only
survey.unlink()
# Unlink: restricted to self or no one
(unrestricted_survey + restricted_to_self_survey).unlink()
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).unlink()
restricted_to_other_survey.with_user(self.env.user).unlink()
@mute_logger('odoo.addons.base.models.ir_model')
@users('user_emp')
@ -254,12 +263,12 @@ class TestAccess(common.TestSurveyCommon):
self.env['survey.question'].create({'title': 'Other', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': survey_own.id})
question_own = self.env['survey.question'].create({'title': 'Other Question', 'sequence': 1, 'survey_id': survey_own.id})
# Create: own survey only
# Create: unrestricted survey
answer_own = self.env['survey.user_input'].create({'survey_id': survey_own.id})
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].create({'question_id': question_own.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': answer_own.id})
# Read: always
# Read: restricted to self or no one
answers = self.env['survey.user_input'].search([('survey_id', 'in', [survey_own.id, self.survey.id])])
self.assertEqual(answers, answer_own | self.answer_0)
@ -268,21 +277,27 @@ class TestAccess(common.TestSurveyCommon):
self.env['survey.user_input'].browse(answer_own.ids).read(['state'])
self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
# Create: own survey only (moved after read because DB not correctly rollbacked with assertRaises)
self.survey.write({'restrict_user_ids': [[4, self.survey.user_id.id]]})
with self.assertRaises(AccessError):
self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
# Create: in restricted users survey (moved after read because DB not correctly rollbacked with assertRaises)
self.survey.write({'restrict_user_ids': [[4, self.survey.user_id.id]]})
with self.assertRaises(AccessError):
answer_other = self.env['survey.user_input'].create({'survey_id': self.survey.id})
with self.assertRaises(AccessError):
answer_line_other = self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
# Write: own survey only
# Write: unrestricted survey or in restricted users
answer_own.write({'state': 'done'})
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).write({'state': 'done'})
# Unlink: own survey only
# Unlink: unrestricted survey or in restricted users
answer_own.unlink()
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).unlink()
@ -359,53 +374,3 @@ class TestSurveySecurityControllers(common.TestSurveyCommon, HttpCase):
self.survey.write({'session_state': False, 'active': True})
response = self.url_open('/s/123456')
self.assertFalse(self.survey.title in response.text)
def test_print_survey_access_mode_token(self):
"""Check that a survey with access_mode=token with questions defined can always be printed."""
# Case: No questions, no answers -> general print informs the user "your survey is empty"
survey = self.env['survey.survey'].with_user(self.survey_manager).create({
'title': 'Test Survey without answers',
'access_mode': 'token',
'users_login_required': False,
'users_can_go_back': False,
})
self.authenticate(self.survey_manager.login, self.survey_manager.login)
response = self.url_open(f'/survey/print/{survey.access_token}')
self.assertEqual(response.status_code, 200,
"Print request to shall succeed for a survey without questions nor answers")
self.assertIn("survey is empty", str(response.content),
"Survey print without questions nor answers should inform user that the survey is empty")
# Case: a question, no answers -> general print shows the question
question = self.env['survey.question'].with_user(self.survey_manager).create({
'title': 'Test Question',
'survey_id': survey.id,
'sequence': 1,
'is_page': False,
'question_type': 'char_box',
})
response = self.url_open(f'/survey/print/{survey.access_token}')
self.assertEqual(response.status_code, 200,
"Print request to shall succeed for a survey with questions but no answers")
self.assertIn(question.title, str(response.content),
"Should be possible to print a survey with a question and without answers")
# Case: a question, an answers -> general print shows the question
user_input = self._add_answer(survey, self.survey_manager.partner_id, state='done')
self._add_answer_line(question, user_input, "Test Answer")
response = self.url_open(f'/survey/print/{survey.access_token}')
self.assertEqual(response.status_code, 200,
"Print request without answer token, should be possible for a survey with questions and answers")
self.assertIn(question.title, str(response.content),
"Survey question should be visible in general print, even when answers exist and no answer_token is provided")
self.assertNotIn("Test Answer", str(response.content),
"Survey answer should not be in general print, when no answer_token is provided")
# Case: a question, an answers -> print with answer_token shows both
response = self.url_open(f'/survey/print/{survey.access_token}?answer_token={user_input.access_token}')
self.assertEqual(response.status_code, 200,
"Should be possible to print a sruvey with questions and answers")
self.assertIn(question.title, str(response.content),
"Question should appear when printing survey with using an answer_token")
self.assertIn("Test Answer", str(response.content),
"Answer should appear when printing survey with using an answer_token")

View file

@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import HttpCase, tagged
@tagged('post_install', '-at_install')
class TestUi(HttpCase):
def test_tour_test_survey_form_triggers(self):
self.start_tour('/odoo', 'survey_tour_test_survey_form_triggers', login='admin')

View file

@ -1,7 +1,7 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
from odoo import Command
from odoo.addons.base.tests.common import HttpCaseWithUserDemo
@ -26,17 +26,17 @@ class TestUiFeedback(HttpCaseWithUserDemo):
'is_page': True,
'description': """<p>This section is about general information about you. Answering them helps qualifying your answers.</p>""",
}), (0, 0, {
'title': 'Where do you live ?',
'title': 'Where do you live?',
'sequence': 2,
'question_type': 'char_box',
'constr_mandatory': False,
}), (0, 0, {
'title': 'When is your date of birth ?',
'title': 'When is your date of birth?',
'sequence': 3,
'question_type': 'date',
'description': False,
}), (0, 0, {
'title': 'How frequently do you buy products online ?',
'title': 'How frequently do you buy products online?',
'sequence': 4,
'question_type': 'simple_choice',
'comments_allowed': True,
@ -60,7 +60,7 @@ class TestUiFeedback(HttpCaseWithUserDemo):
'sequence': 5,
})],
}), (0, 0, {
'title': 'How many times did you order products on our website ?',
'title': 'How many times did you order products on our website?',
'sequence': 5,
'question_type': 'numerical_box',
'constr_mandatory': True,
@ -71,7 +71,7 @@ class TestUiFeedback(HttpCaseWithUserDemo):
'question_type': False,
'description': """<p>This section is about our eCommerce experience itself.</p>""",
}), (0, 0, {
'title': 'Which of the following words would you use to describe our products ?',
'title': 'Which of the following words would you use to describe our products?',
'sequence': 7,
'question_type': 'multiple_choice',
'constr_mandatory': True,
@ -107,7 +107,7 @@ class TestUiFeedback(HttpCaseWithUserDemo):
'sequence': 9,
})],
}), (0, 0, {
'title': 'What do your think about our new eCommerce ?',
'title': 'What do your think about our new eCommerce?',
'sequence': 8,
'question_type': 'matrix',
'matrix_subtype': 'multiple',
@ -142,15 +142,24 @@ class TestUiFeedback(HttpCaseWithUserDemo):
'sequence': 5,
})],
}), (0, 0, {
'title': 'Do you have any other comments, questions, or concerns ?',
'title': 'Do you have any other comments, questions, or concerns?',
'sequence': 9,
'question_type': 'text_box',
'constr_mandatory': False,
})
}), (0, 0, {
'title': 'How would you rate your experience on our website?',
'sequence': 15,
'question_type': 'scale',
'scale_min': 1,
'scale_max': 5,
'scale_min_label': 'Bad experience',
'scale_mid_label': 'Do the job',
'scale_max_label': 'Very good experience',
'constr_mandatory': True,
}),
],
})
def test_01_admin_survey_tour(self):
access_token = self.survey_feedback.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey', login="admin")
@ -159,9 +168,18 @@ class TestUiFeedback(HttpCaseWithUserDemo):
access_token = self.survey_feedback.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey', login="demo")
def test_03_public_survey_tour(self):
def test_03_public_multilingual_survey_tour(self):
# Setup survey translation
self.assertEqual([lang[0] for lang in self.env['res.lang'].get_installed()], ['en_US'])
self.env['res.lang']._activate_lang('fr_BE')
self.survey_feedback_fr = self.survey_feedback.with_context(lang='fr_BE')
self.survey_feedback_fr.title = "Enquête de satisfaction"
for survey_item in self.survey_feedback_fr.question_and_page_ids:
survey_item.title = f"FR: {survey_item.with_context(lang='en_US').title}"
self.survey_feedback.lang_ids = self.env['res.lang'].search([('code', 'in', ['fr_BE', 'en_US'])])
access_token = self.survey_feedback.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey')
self.start_tour("/survey/start/%s" % access_token, 'test_survey_multilang')
def test_04_public_survey_with_triggers(self):
""" Check that chained conditional questions are correctly
@ -187,82 +205,155 @@ class TestUiFeedback(HttpCaseWithUserDemo):
'sequence': 1,
'question_type': 'simple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Answer 1',
'sequence': 1,
}), (0, 0, {
'value': 'Answer 2',
'sequence': 2,
}), (0, 0, {
'value': 'Answer 3',
'sequence': 3,
})
Command.create({'value': 'Answer 1'}),
Command.create({'value': 'Answer 2'}),
Command.create({'value': 'Answer 3'}),
],
'constr_mandatory': True,
}), (0, 0, {
}), Command.create({
'title': 'Q2',
'sequence': 2,
'question_type': 'simple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Answer 1',
'sequence': 1,
}), (0, 0, {
'value': 'Answer 2',
'sequence': 2,
})
Command.create({'value': 'Answer 1'}),
Command.create({'value': 'Answer 2'}),
],
'is_conditional': True,
'constr_mandatory': True,
}), (0, 0, {
}), Command.create({
'title': 'Q3',
'sequence': 3,
'question_type': 'simple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Answer 1',
'sequence': 1,
}), (0, 0, {
'value': 'Answer 2',
'sequence': 2,
})
Command.create({'value': 'Answer 1'}),
Command.create({'value': 'Answer 2'}),
],
'is_conditional': True,
'constr_mandatory': True,
}), (0, 0, {
}), Command.create({
'title': 'Q4',
'sequence': 4,
'question_type': 'numerical_box',
'is_conditional': True,
'constr_mandatory': True,
}), (0, 0, {
'title': 'Q5',
'sequence': 5,
'question_type': 'numerical_box',
'is_conditional': True,
})
]
})
q1, q2, q3, q4, q5 = survey_with_triggers.question_and_page_ids
q1_a1, q1_a2, __ = q1.suggested_answer_ids
q1, q2, q3, q4 = survey_with_triggers.question_and_page_ids
q1_a1, __, q1_a3 = q1.suggested_answer_ids
q2_a1 = q2.suggested_answer_ids[0]
q2.triggering_question_id = q1
q2.triggering_answer_id = q1_a1
q3.triggering_question_id = q2
q3.triggering_answer_id = q2_a1
q4.triggering_question_id = q1
q4.triggering_answer_id = q1_a2
q5.triggering_question_id = q1
q5.triggering_answer_id = q1_a2
q2.triggering_answer_ids = q1_a1
q3.triggering_answer_ids = q1_a3 | q2_a1
q4.triggering_answer_ids = q1_a1
access_token = survey_with_triggers.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey_chained_conditional_questions')
def test_05_public_survey_with_trigger_on_different_page(self):
"""Check that conditional questions are shown when triggered from a different page too."""
survey_with_trigger_on_different_page = self.env['survey.survey'].create({
'title': 'Survey With Trigger on a different page',
'access_token': '1cb935bd-2399-4ed1-9e10-c649318fb4dc',
'access_mode': 'public',
'users_can_go_back': True,
'questions_layout': 'page_per_section',
'description': "<p>Test survey with conditional questions triggered from a previous section</p>",
'question_and_page_ids': [
Command.create({
'title': 'Section 1',
'is_page': True,
'sequence': 1,
'question_type': False,
}), Command.create({
'title': 'Q1',
'sequence': 2,
'question_type': 'simple_choice',
'suggested_answer_ids': [
Command.create({'value': 'Answer 1'}),
Command.create({'value': 'Answer 2'}),
Command.create({'value': 'Answer 3'}),
],
'constr_mandatory': False,
}), Command.create({
'title': 'Section 2',
'is_page': True,
'sequence': 3,
'question_type': False,
}), Command.create({
'title': 'Q2',
'sequence': 4,
'question_type': 'simple_choice',
'suggested_answer_ids': [
Command.create({'value': 'Answer 1'}),
Command.create({'value': 'Answer 2'}),
],
'constr_mandatory': False,
}), Command.create({
'title': 'Q3',
'sequence': 3,
'question_type': 'numerical_box',
'constr_mandatory': False,
}),
]
})
q1 = survey_with_trigger_on_different_page.question_ids.filtered(lambda q: q.title == 'Q1')
q1_a1 = q1.suggested_answer_ids.filtered(lambda a: a.value == 'Answer 1')
q2 = survey_with_trigger_on_different_page.question_ids.filtered(lambda q: q.title == 'Q2')
q2_a1 = q2.suggested_answer_ids.filtered(lambda a: a.value == 'Answer 1')
q3 = survey_with_trigger_on_different_page.question_ids.filtered(lambda q: q.title == 'Q3')
q3.triggering_answer_ids = q1_a1 | q2_a1
access_token = survey_with_trigger_on_different_page.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey_conditional_question_on_different_page')
def test_06_survey_prefill(self):
access_token = self.survey_feedback.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey_prefill')
def test_07_survey_roaming_mandatory_questions(self):
survey_with_mandatory_questions = self.env['survey.survey'].create({
'title': 'Survey With Mandatory questions',
'access_token': '853ebb30-40f2-43bf-a95a-bbf0e367a365',
'access_mode': 'public',
'users_can_go_back': True,
'questions_layout': 'page_per_question',
'description': "<p>Test survey with roaming freely option and mandatory questions</p>",
'question_and_page_ids': [
Command.create({
'title': 'Q1',
'sequence': 1,
'question_type': 'simple_choice',
'constr_mandatory': True,
'suggested_answer_ids': [
Command.create({'value': 'Answer 1'}),
Command.create({'value': 'Answer 2'}),
Command.create({'value': 'Answer 3'}),
],
}), Command.create({
'title': 'Q2',
'sequence': 2,
'question_type': 'simple_choice',
'constr_mandatory': True,
'suggested_answer_ids': [
Command.create({'value': 'Answer 1'}),
Command.create({'value': 'Answer 2'}),
Command.create({'value': 'Answer 3'}),
],
}), Command.create({
'title': 'Q3',
'sequence': 3,
'question_type': 'simple_choice',
'constr_mandatory': True,
'suggested_answer_ids': [
Command.create({'value': 'Answer 1'}),
Command.create({'value': 'Answer 2'}),
],
}),
]
})
access_token = survey_with_mandatory_questions.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey_roaming_mandatory_questions')