Initial commit: Core packages

This commit is contained in:
Ernad Husremovic 2025-08-29 15:20:45 +02:00
commit 12c29a983b
9512 changed files with 8379910 additions and 0 deletions

View file

@ -0,0 +1,15 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import common
from . import test_survey
from . import test_survey_flow
from . import test_survey_flow_with_conditions
from . import test_certification_flow
from . import test_survey_invite
from . import test_survey_security
from . import test_survey_randomize
from . import test_survey_ui_certification
from . import test_survey_ui_feedback
from . import test_survey_compute_pages_questions
from . import test_certification_badge

View file

@ -0,0 +1,316 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from collections import Counter
from contextlib import contextmanager
from odoo.addons.mail.tests.common import mail_new_test_user
from odoo.tests import common
class SurveyCase(common.TransactionCase):
def setUp(self):
super(SurveyCase, self).setUp()
""" Some custom stuff to make the matching between questions and answers
:param dict _type_match: dict
key: question type
value: (answer type, answer field_name)
"""
self._type_match = {
'text_box': ('text_box', 'value_text_box'),
'char_box': ('char_box', 'value_char_box'),
'numerical_box': ('numerical_box', 'value_numerical_box'),
'date': ('date', 'value_date'),
'datetime': ('datetime', 'value_datetime'),
'simple_choice': ('suggestion', 'suggested_answer_id'), # TDE: still unclear
'multiple_choice': ('suggestion', 'suggested_answer_id'), # TDE: still unclear
'matrix': ('suggestion', ('suggested_answer_id', 'matrix_row_id')), # TDE: still unclear
}
# ------------------------------------------------------------
# ASSERTS
# ------------------------------------------------------------
def assertAnswer(self, answer, state, page):
self.assertEqual(answer.state, state)
self.assertEqual(answer.last_displayed_page_id, page)
def assertAnswerLines(self, page, answer, answer_data):
""" Check answer lines.
:param dict answer_data:
key = question ID
value = {'value': [user input]}
"""
lines = answer.user_input_line_ids.filtered(lambda l: l.page_id == page)
answer_count = sum(len(user_input['value']) for user_input in answer_data.values())
self.assertEqual(len(lines), answer_count)
for qid, user_input in answer_data.items():
answer_lines = lines.filtered(lambda l: l.question_id.id == qid)
question = answer_lines[0].question_id # TDE note: might have several answers for a given question
if question.question_type == 'multiple_choice':
values = user_input['value']
answer_fname = self._type_match[question.question_type][1]
self.assertEqual(
Counter(getattr(line, answer_fname).id for line in answer_lines),
Counter(values))
elif question.question_type == 'simple_choice':
[value] = user_input['value']
answer_fname = self._type_match[question.question_type][1]
self.assertEqual(getattr(answer_lines, answer_fname).id, value)
elif question.question_type == 'matrix':
[value_col, value_row] = user_input['value']
answer_fname_col = self._type_match[question.question_type][1][0]
answer_fname_row = self._type_match[question.question_type][1][1]
self.assertEqual(getattr(answer_lines, answer_fname_col).id, value_col)
self.assertEqual(getattr(answer_lines, answer_fname_row).id, value_row)
else:
[value] = user_input['value']
answer_fname = self._type_match[question.question_type][1]
if question.question_type == 'numerical_box':
self.assertEqual(getattr(answer_lines, answer_fname), float(value))
else:
self.assertEqual(getattr(answer_lines, answer_fname), value)
def assertResponse(self, response, status_code, text_bits=None):
self.assertEqual(response.status_code, status_code)
for text in text_bits or []:
self.assertIn(text, response.text)
# ------------------------------------------------------------
# DATA CREATION
# ------------------------------------------------------------
def _add_question(self, page, name, qtype, **kwargs):
constr_mandatory = kwargs.pop('constr_mandatory', True)
constr_error_msg = kwargs.pop('constr_error_msg', 'TestError')
sequence = kwargs.pop('sequence', False)
if not sequence:
sequence = page.question_ids[-1].sequence + 1 if page.question_ids else page.sequence + 1
base_qvalues = {
'sequence': sequence,
'title': name,
'question_type': qtype,
'constr_mandatory': constr_mandatory,
'constr_error_msg': constr_error_msg,
}
if qtype in ('simple_choice', 'multiple_choice'):
base_qvalues['suggested_answer_ids'] = [
(0, 0, {
'value': label['value'],
'answer_score': label.get('answer_score', 0),
'is_correct': label.get('is_correct', False)
}) for label in kwargs.pop('labels')
]
elif qtype == 'matrix':
base_qvalues['matrix_subtype'] = kwargs.pop('matrix_subtype', 'simple')
base_qvalues['suggested_answer_ids'] = [
(0, 0, {'value': label['value'], 'answer_score': label.get('answer_score', 0)})
for label in kwargs.pop('labels')
]
base_qvalues['matrix_row_ids'] = [
(0, 0, {'value': label['value'], 'answer_score': label.get('answer_score', 0)})
for label in kwargs.pop('labels_2')
]
else:
pass
base_qvalues.update(kwargs)
question = self.env['survey.question'].create(base_qvalues)
return question
def _add_answer(self, survey, partner, **kwargs):
base_avals = {
'survey_id': survey.id,
'partner_id': partner.id if partner else False,
'email': kwargs.pop('email', False),
}
base_avals.update(kwargs)
return self.env['survey.user_input'].create(base_avals)
def _add_answer_line(self, question, answer, answer_value, **kwargs):
qtype = self._type_match.get(question.question_type, (False, False))
answer_type = kwargs.pop('answer_type', qtype[0])
answer_fname = kwargs.pop('answer_fname', qtype[1])
if question.question_type == 'matrix':
answer_fname = qtype[1][0]
base_alvals = {
'user_input_id': answer.id,
'question_id': question.id,
'skipped': False,
'answer_type': answer_type,
}
base_alvals[answer_fname] = answer_value
if 'answer_value_row' in kwargs:
answer_value_row = kwargs.pop('answer_value_row')
base_alvals[qtype[1][1]] = answer_value_row
base_alvals.update(kwargs)
return self.env['survey.user_input.line'].create(base_alvals)
# ------------------------------------------------------------
# UTILS / CONTROLLER ENDPOINTS FLOWS
# ------------------------------------------------------------
def _access_start(self, survey):
return self.url_open('/survey/start/%s' % survey.access_token)
def _access_page(self, survey, token):
return self.url_open('/survey/%s/%s' % (survey.access_token, token))
def _access_begin(self, survey, token):
url = survey.get_base_url() + '/survey/begin/%s/%s' % (survey.access_token, token)
return self.opener.post(url=url, json={})
def _access_submit(self, survey, token, post_data):
url = survey.get_base_url() + '/survey/submit/%s/%s' % (survey.access_token, token)
return self.opener.post(url=url, json={'params': post_data})
def _find_csrf_token(self, text):
csrf_token_re = re.compile("(input.+csrf_token.+value=\")([a-f0-9]{40}o[0-9]*)", re.MULTILINE)
return csrf_token_re.search(text).groups()[1]
def _prepare_post_data(self, question, answers, post_data):
values = answers if isinstance(answers, list) else [answers]
if question.question_type == 'multiple_choice':
for value in values:
value = str(value)
if question.id in post_data:
if isinstance(post_data[question.id], list):
post_data[question.id].append(value)
else:
post_data[question.id] = [post_data[question.id], value]
else:
post_data[question.id] = value
else:
[values] = values
post_data[question.id] = str(values)
return post_data
def _answer_question(self, question, answer, answer_token, csrf_token, button_submit='next'):
# Employee submits the question answer
post_data = self._format_submission_data(question, answer, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': button_submit})
response = self._access_submit(question.survey_id, answer_token, post_data)
self.assertResponse(response, 200)
# Employee is redirected on next question
response = self._access_page(question.survey_id, answer_token)
self.assertResponse(response, 200)
def _answer_page(self, page, answers, answer_token, csrf_token):
post_data = {}
for question, answer in answers.items():
post_data[question.id] = answer.id
post_data['page_id'] = page.id
post_data['csrf_token'] = csrf_token
post_data['token'] = answer_token
response = self._access_submit(page.survey_id, answer_token, post_data)
self.assertResponse(response, 200)
response = self._access_page(page.survey_id, answer_token)
self.assertResponse(response, 200)
def _format_submission_data(self, question, answer, additional_post_data):
post_data = {}
post_data['question_id'] = question.id
post_data.update(self._prepare_post_data(question, answer, post_data))
if question.page_id:
post_data['page_id'] = question.page_id.id
post_data.update(**additional_post_data)
return post_data
# ------------------------------------------------------------
# UTILS / TOOLS
# ------------------------------------------------------------
def _assert_skipped_question(self, question, survey_user):
statistics = question._prepare_statistics(survey_user.user_input_line_ids)
question_data = next(
(question_data
for question_data in statistics
if question_data.get('question') == question),
False
)
self.assertTrue(bool(question_data))
self.assertEqual(len(question_data.get('answer_input_skipped_ids')), 1)
def _create_one_question_per_type(self):
all_questions = self.env['survey.question']
for (question_type, dummy) in self.env['survey.question']._fields['question_type'].selection:
kwargs = {}
if question_type == 'multiple_choice':
kwargs['labels'] = [{'value': 'MChoice0'}, {'value': 'MChoice1'}]
elif question_type == 'simple_choice':
kwargs['labels'] = [{'value': 'SChoice0'}, {'value': 'SChoice1'}]
elif question_type == 'matrix':
kwargs['labels'] = [{'value': 'Column0'}, {'value': 'Column1'}]
kwargs['labels_2'] = [{'value': 'Row0'}, {'value': 'Row1'}]
all_questions |= self._add_question(self.page_0, 'Q0', question_type, **kwargs)
return all_questions
class TestSurveyCommon(SurveyCase):
def setUp(self):
super(TestSurveyCommon, self).setUp()
""" Create test data: a survey with some pre-defined questions and various test users for ACL """
self.survey_manager = mail_new_test_user(
self.env, name='Gustave Doré', login='survey_manager', email='survey.manager@example.com',
groups='survey.group_survey_manager,base.group_user'
)
self.survey_user = mail_new_test_user(
self.env, name='Lukas Peeters', login='survey_user', email='survey.user@example.com',
groups='survey.group_survey_user,base.group_user'
)
self.user_emp = mail_new_test_user(
self.env, name='Eglantine Employee', login='user_emp', email='employee@example.com',
groups='base.group_user', password='user_emp'
)
self.user_portal = mail_new_test_user(
self.env, name='Patrick Portal', login='user_portal', email='portal@example.com',
groups='base.group_portal'
)
self.user_public = mail_new_test_user(
self.env, name='Pauline Public', login='user_public', email='public@example.com',
groups='base.group_public'
)
self.customer = self.env['res.partner'].create({
'name': 'Caroline Customer',
'email': 'customer@example.com',
})
self.survey = self.env['survey.survey'].with_user(self.survey_manager).create({
'title': 'Test Survey',
'access_mode': 'public',
'users_login_required': True,
'users_can_go_back': False,
})
self.page_0 = self.env['survey.question'].with_user(self.survey_manager).create({
'title': 'First page',
'survey_id': self.survey.id,
'sequence': 1,
'is_page': True,
'question_type': False,
})
self.question_ft = self.env['survey.question'].with_user(self.survey_manager).create({
'title': 'Test Free Text',
'survey_id': self.survey.id,
'sequence': 2,
'question_type': 'text_box',
})
self.question_num = self.env['survey.question'].with_user(self.survey_manager).create({
'title': 'Test NUmerical Box',
'survey_id': self.survey.id,
'sequence': 3,
'question_type': 'numerical_box',
})

View file

@ -0,0 +1,200 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests import common
from psycopg2 import IntegrityError
from odoo.exceptions import AccessError
from odoo.tools import mute_logger
class TestCertificationBadge(common.TestSurveyCommon):
def setUp(self):
super(TestCertificationBadge, self).setUp()
self.certification_survey = self.env['survey.survey'].with_user(self.survey_manager).create({
'title': 'Certification Survey',
'access_mode': 'public',
'users_login_required': True,
'scoring_type': 'scoring_with_answers',
'certification': True,
})
self.certification_survey_2 = self.env['survey.survey'].with_user(self.survey_manager).create({
'title': 'Another Certification Survey',
'access_mode': 'public',
'users_login_required': True,
'scoring_type': 'scoring_with_answers',
'certification': True,
})
self.certification_badge = self.env['gamification.badge'].with_user(self.survey_manager).create({
'name': self.certification_survey.title,
'description': 'Congratulations, you have succeeded this certification',
'rule_auth': 'nobody',
'level': None,
})
self.certification_badge_2 = self.env['gamification.badge'].with_user(self.survey_manager).create({
'name': self.certification_survey.title + ' 2',
'description': 'Congratulations, you have succeeded this certification',
'rule_auth': 'nobody',
'level': None,
})
self.certification_badge_3 = self.env['gamification.badge'].with_user(self.survey_manager).create({
'name': self.certification_survey.title + ' 3',
'description': 'Congratulations, you have succeeded this certification',
'rule_auth': 'nobody',
'level': None,
})
def test_archive(self):
""" Archive status of survey is propagated to its badges. """
self.certification_survey.write({
'certification_give_badge': True,
'certification_badge_id': self.certification_badge.id
})
self.certification_survey.action_archive()
self.assertFalse(self.certification_survey.active)
self.assertFalse(self.certification_badge.active)
self.certification_survey.action_unarchive()
self.assertTrue(self.certification_survey.active)
self.assertTrue(self.certification_badge.active)
def test_set_same_badge_on_multiple_survey(self):
self.certification_survey.write({
'certification_give_badge': True,
'certification_badge_id': self.certification_badge.id
})
# set the same badge on another survey should fail:
with mute_logger('odoo.sql_db'):
with self.assertRaises(IntegrityError):
self.certification_survey_2.write({
'certification_give_badge': True,
'certification_badge_id': self.certification_badge.id
})
def test_badge_configuration(self):
""" Test badge synchronization """
# add a certification badge on a new survey
challenge = self.env['gamification.challenge'].search([('reward_id', '=', self.certification_badge.id)])
self.assertEqual(len(challenge), 0, """A challenge should not exist or be linked to the certification badge
if the certification badge have not been activated on a certification survey""")
self.certification_survey.write({
'certification_give_badge': True,
'certification_badge_id': self.certification_badge.id
})
challenge = self.env['gamification.challenge'].search([('reward_id', '=', self.certification_badge.id)])
self.assertEqual(len(challenge), 1,
"A challenge should be created if the certification badge is activated on a certification survey")
challenge_line = self.env['gamification.challenge.line'].search([('challenge_id', '=', challenge.id)])
self.assertEqual(len(challenge_line), 1,
"A challenge_line should be created if the certification badge is activated on a certification survey")
goal = challenge_line.definition_id
self.assertEqual(len(goal), 1,
"A goal should be created if the certification badge is activated on a certification survey")
# don't give badge anymore
self.certification_survey.write({'certification_give_badge': False})
self.assertEqual(self.certification_badge.id, self.certification_survey.certification_badge_id.id,
'The certification badge should still be set on certification survey even if give_badge is false.')
self.assertEqual(self.certification_badge.active, False,
'The certification badge should be inactive if give_badge is false.')
challenge = self.env['gamification.challenge'].search([('id', '=', challenge.id)])
self.assertEqual(len(challenge), 0,
"The challenge should be deleted if the certification badge is unset from the certification survey")
challenge_line = self.env['gamification.challenge.line'].search([('id', '=', challenge_line.id)])
self.assertEqual(len(challenge_line), 0,
"The challenge_line should be deleted if the certification badge is unset from the certification survey")
goal = self.env['gamification.goal'].search([('id', '=', goal.id)])
self.assertEqual(len(goal), 0,
"The goal should be deleted if the certification badge is unset from the certification survey")
# re active the badge in the survey
self.certification_survey.write({'certification_give_badge': True})
self.assertEqual(self.certification_badge.active, True,
'The certification badge should be active if give_badge is true.')
challenge = self.env['gamification.challenge'].search([('reward_id', '=', self.certification_badge.id)])
self.assertEqual(len(challenge), 1,
"A challenge should be created if the certification badge is activated on a certification survey")
challenge_line = self.env['gamification.challenge.line'].search([('challenge_id', '=', challenge.id)])
self.assertEqual(len(challenge_line), 1,
"A challenge_line should be created if the certification badge is activated on a certification survey")
goal = challenge_line.definition_id
self.assertEqual(len(goal), 1,
"A goal should be created if the certification badge is activated on a certification survey")
# If 'certification_give_badge' is True but no certification badge is linked, ValueError should be raised
duplicate_survey = self.certification_survey.copy()
self.assertFalse(duplicate_survey.certification_give_badge, "Value for field 'certification_give_badge' should not be copied")
self.assertEqual(duplicate_survey.certification_badge_id, self.env['gamification.badge'], "Badge should be empty")
with self.assertRaises(ValueError):
duplicate_survey.write({'certification_give_badge': True})
def test_certification_badge_access(self):
self.certification_badge.with_user(self.survey_manager).write(
{'description': "Spoiler alert: I'm Aegon Targaryen and I sleep with the Dragon Queen, who is my aunt by the way! So I can do whatever I want! Even if I know nothing!"})
self.certification_badge.with_user(self.survey_user).write({'description': "Youpie Yeay!"})
with self.assertRaises(AccessError):
self.certification_badge.with_user(self.user_emp).write({'description': "I'm a dude who think that has every right on the Iron Throne"})
with self.assertRaises(AccessError):
self.certification_badge.with_user(self.user_portal).write({'description': "Guy, you just can't do that !"})
with self.assertRaises(AccessError):
self.certification_badge.with_user(self.user_public).write({'description': "What did you expect ? Schwepps !"})
def test_badge_configuration_multi(self):
vals = {
'title': 'Certification Survey',
'access_mode': 'public',
'users_login_required': True,
'scoring_type': 'scoring_with_answers',
'certification': True,
'certification_give_badge': True,
'certification_badge_id': self.certification_badge.id,
}
survey_1 = self.env['survey.survey'].create(vals.copy())
vals.update({'certification_badge_id': self.certification_badge_2.id})
survey_2 = self.env['survey.survey'].create(vals.copy())
vals.update({'certification_badge_id': self.certification_badge_3.id})
survey_3 = self.env['survey.survey'].create(vals)
certification_surveys = self.env['survey.survey'].browse([survey_1.id, survey_2.id, survey_3.id])
self.assertEqual(len(certification_surveys), 3, 'There should be 3 certification survey created')
challenges = self.env['gamification.challenge'].search([('reward_id', 'in', certification_surveys.mapped('certification_badge_id').ids)])
self.assertEqual(len(challenges), 3, "3 challenges should be created")
challenge_lines = self.env['gamification.challenge.line'].search([('challenge_id', 'in', challenges.ids)])
self.assertEqual(len(challenge_lines), 3, "3 challenge_lines should be created")
goals = challenge_lines.mapped('definition_id')
self.assertEqual(len(goals), 3, "3 goals should be created")
# Test write multi
certification_surveys.write({'certification_give_badge': False})
for survey in certification_surveys:
self.assertEqual(survey.certification_badge_id.active, False,
'Every badge should be inactive if the 3 survey does not give badge anymore')
challenges = self.env['gamification.challenge'].search([('id', 'in', challenges.ids)])
self.assertEqual(len(challenges), 0, "The 3 challenges should be deleted")
challenge_lines = self.env['gamification.challenge.line'].search([('id', 'in', challenge_lines.ids)])
self.assertEqual(len(challenge_lines), 0, "The 3 challenge_lines should be deleted")
goals = self.env['gamification.goal'].search([('id', 'in', goals.ids)])
self.assertEqual(len(goals), 0, "The 3 goals should be deleted")
certification_surveys.write({'certification_give_badge': True})
for survey in certification_surveys:
self.assertEqual(survey.certification_badge_id.active, True,
'Every badge should be reactivated if the 3 survey give badges again')
challenges = self.env['gamification.challenge'].search([('reward_id', 'in', certification_surveys.mapped('certification_badge_id').ids)])
self.assertEqual(len(challenges), 3, "3 challenges should be created")
challenge_lines = self.env['gamification.challenge.line'].search([('challenge_id', 'in', challenges.ids)])
self.assertEqual(len(challenge_lines), 3, "3 challenge_lines should be created")
goals = challenge_lines.mapped('definition_id')
self.assertEqual(len(goals), 3, "3 goals should be created")

View file

@ -0,0 +1,215 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from odoo.addons.base.models.ir_mail_server import IrMailServer
from odoo.addons.survey.tests import common
from odoo.tests import tagged
from odoo.tests.common import HttpCase
@tagged('-at_install', 'post_install', 'functional')
class TestCertificationFlow(common.TestSurveyCommon, HttpCase):
def test_flow_certification(self):
# Step: survey user creates the certification
# --------------------------------------------------
with self.with_user('survey_user'):
certification = self.env['survey.survey'].create({
'title': 'User Certification for SO lines',
'access_mode': 'public',
'users_login_required': True,
'questions_layout': 'page_per_question',
'users_can_go_back': True,
'scoring_type': 'scoring_with_answers',
'scoring_success_min': 85.0,
'certification': True,
'certification_mail_template_id': self.env.ref('survey.mail_template_certification').id,
'is_time_limited': True,
'time_limit': 10,
})
q01 = self._add_question(
None, 'When do you know it\'s the right time to use the SO line model?', 'simple_choice',
sequence=1,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=certification.id,
labels=[
{'value': 'Please stop'},
{'value': 'Only on the SO form'},
{'value': 'Only on the Survey form'},
{'value': 'Easy, all the time!!!', 'is_correct': True, 'answer_score': 2.0}
])
q02 = self._add_question(
None, 'On average, how many lines of code do you need when you use SO line widgets?', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=certification.id,
labels=[
{'value': '1'},
{'value': '5', 'is_correct': True, 'answer_score': 2.0},
{'value': '100'},
{'value': '1000'}
])
q03 = self._add_question(
None, 'What do you think about SO line widgets (not rated)?', 'text_box',
sequence=3,
constr_mandatory=True, constr_error_msg='Please tell us what you think', survey_id=certification.id)
q04 = self._add_question(
None, 'On a scale of 1 to 10, how much do you like SO line widgets (not rated)?', 'simple_choice',
sequence=4,
constr_mandatory=True, constr_error_msg='Please tell us what you think', survey_id=certification.id,
labels=[
{'value': '-1'},
{'value': '0'},
{'value': '100'}
])
q05 = self._add_question(
None, 'Select all the correct "types" of SO lines', 'multiple_choice',
sequence=5,
constr_mandatory=False, survey_id=certification.id,
labels=[
{'value': 'sale_order', 'is_correct': True, 'answer_score': 1.0},
{'value': 'survey_page', 'is_correct': True, 'answer_score': 1.0},
{'value': 'survey_question', 'is_correct': True, 'answer_score': 1.0},
{'value': 'a_future_and_yet_unknown_model', 'is_correct': True, 'answer_score': 1.0},
{'value': 'none', 'answer_score': -1.0}
])
# Step: employee takes the certification
# --------------------------------------------------
self.authenticate('user_emp', 'user_emp')
# Employee opens start page
response = self._access_start(certification)
self.assertResponse(response, 200, [certification.title, 'Time limit for this certification', '10 minutes'])
# -> this should have generated a new user_input with a token
user_inputs = self.env['survey.user_input'].search([('survey_id', '=', certification.id)])
self.assertEqual(len(user_inputs), 1)
self.assertEqual(user_inputs.partner_id, self.user_emp.partner_id)
answer_token = user_inputs.access_token
# Employee begins survey with first page
response = self._access_page(certification, answer_token)
self.assertResponse(response, 200)
csrf_token = self._find_csrf_token(response.text)
r = self._access_begin(certification, answer_token)
self.assertResponse(r, 200)
with patch.object(IrMailServer, 'connect'):
self._answer_question(q01, q01.suggested_answer_ids.ids[3], answer_token, csrf_token)
self._answer_question(q02, q02.suggested_answer_ids.ids[1], answer_token, csrf_token)
self._answer_question(q03, "I think they're great!", answer_token, csrf_token)
self._answer_question(q04, q04.suggested_answer_ids.ids[0], answer_token, csrf_token, button_submit='previous')
self._answer_question(q03, "Just kidding, I don't like it...", answer_token, csrf_token)
self._answer_question(q04, q04.suggested_answer_ids.ids[0], answer_token, csrf_token)
self._answer_question(q05, [q05.suggested_answer_ids.ids[0], q05.suggested_answer_ids.ids[1], q05.suggested_answer_ids.ids[3]], answer_token, csrf_token)
user_inputs.invalidate_recordset()
# Check that certification is successfully passed
self.assertEqual(user_inputs.scoring_percentage, 87.5)
self.assertTrue(user_inputs.scoring_success)
# Check that the certification is still successful even if scoring_success_min of certification is modified
certification.write({'scoring_success_min': 90})
self.assertTrue(user_inputs.scoring_success)
# Check answer correction is taken into account
self.assertNotIn("I think they're great!", user_inputs.mapped('user_input_line_ids.value_text_box'))
self.assertIn("Just kidding, I don't like it...", user_inputs.mapped('user_input_line_ids.value_text_box'))
certification_email = self.env['mail.mail'].sudo().search([], limit=1, order="create_date desc")
# Check certification email correctly sent and contains document
self.assertIn("User Certification for SO lines", certification_email.subject)
self.assertIn("employee@example.com", certification_email.email_to)
self.assertEqual(len(certification_email.attachment_ids), 1)
self.assertEqual(certification_email.attachment_ids[0].name, 'Certification Document.html')
def test_randomized_certification(self):
# Step: survey user creates the randomized certification
# --------------------------------------------------
with self.with_user('survey_user'):
certification = self.env['survey.survey'].create({
'title': 'User randomized Certification',
'questions_layout': 'page_per_section',
'questions_selection': 'random',
'scoring_type': 'scoring_without_answers',
})
page1 = self._add_question(
None, 'Page 1', None,
sequence=1,
survey_id=certification.id,
is_page=True,
random_questions_count=1,
)
q101 = self._add_question(
None, 'What is the answer to the first question?', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=certification.id,
labels=[
{'value': 'The correct answer', 'is_correct': True, 'answer_score': 1.0},
{'value': 'The wrong answer'},
])
q102 = self._add_question(
None, 'What is the answer to the second question?', 'simple_choice',
sequence=3,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=certification.id,
labels=[
{'value': 'The correct answer', 'is_correct': True, 'answer_score': 1.0},
{'value': 'The wrong answer'},
])
# Step: employee takes the randomized certification
# --------------------------------------------------
self.authenticate('user_emp', 'user_emp')
# Employee opens start page
response = self._access_start(certification)
# -> this should have generated a new user_input with a token
user_inputs = self.env['survey.user_input'].search([('survey_id', '=', certification.id)])
self.assertEqual(len(user_inputs), 1)
self.assertEqual(user_inputs.partner_id, self.user_emp.partner_id)
answer_token = user_inputs.access_token
# Employee begins survey with first page
response = self._access_page(certification, answer_token)
self.assertResponse(response, 200)
csrf_token = self._find_csrf_token(response.text)
r = self._access_begin(certification, answer_token)
self.assertResponse(r, 200)
with patch.object(IrMailServer, 'connect'):
question_ids = user_inputs.predefined_question_ids
self.assertEqual(len(question_ids), 1, 'Only one question should have been selected by the randomization')
# Whatever which question was selected, the correct answer is the first one
self._answer_question(question_ids, question_ids.suggested_answer_ids.ids[0], answer_token, csrf_token)
statistics = user_inputs._prepare_statistics()[user_inputs]
total_statistics = statistics['totals']
self.assertEqual(total_statistics, [
{'text': 'Correct', 'count': 1},
{'text': 'Partially', 'count': 0},
{'text': 'Incorrect', 'count': 0},
{'text': 'Unanswered', 'count': 0},
], "With the configured randomization, there should be exactly 1 correctly answered question and none skipped.")
section_statistics = statistics['by_section']
self.assertEqual(section_statistics, {
'Page 1': {
'question_count': 1,
'correct': 1,
'partial': 0,
'incorrect': 0,
'skipped': 0,
}
}, "With the configured randomization, there should be exactly 1 correctly answered question in the 'Page 1' section.")

View file

@ -0,0 +1,509 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from freezegun import freeze_time
from odoo import _, Command, fields
from odoo.addons.survey.tests import common
from odoo.tests.common import users
class TestSurveyInternals(common.TestSurveyCommon):
def test_answer_attempts_count(self):
""" As 'attempts_number' and 'attempts_count' are computed using raw SQL queries, let us
test the results. """
test_survey = self.env['survey.survey'].create({
'title': 'Test Survey',
'is_attempts_limited': True,
'attempts_limit': 4,
})
all_attempts = self.env['survey.user_input']
for _i in range(4):
all_attempts |= self._add_answer(test_survey, self.survey_user.partner_id, state='done')
# read both fields at once to allow computing their values in batch
attempts_results = all_attempts.read(['attempts_number', 'attempts_count'])
first_attempt = attempts_results[0]
second_attempt = attempts_results[1]
third_attempt = attempts_results[2]
fourth_attempt = attempts_results[3]
self.assertEqual(first_attempt['attempts_number'], 1)
self.assertEqual(first_attempt['attempts_count'], 4)
self.assertEqual(second_attempt['attempts_number'], 2)
self.assertEqual(second_attempt['attempts_count'], 4)
self.assertEqual(third_attempt['attempts_number'], 3)
self.assertEqual(third_attempt['attempts_count'], 4)
self.assertEqual(fourth_attempt['attempts_number'], 4)
self.assertEqual(fourth_attempt['attempts_count'], 4)
@freeze_time("2020-02-15 18:00")
def test_answer_display_name(self):
""" The "display_name" field in a survey.user_input.line is a computed field that will
display the answer label for any type of question.
Let us test the various question types. """
questions = self._create_one_question_per_type()
user_input = self._add_answer(self.survey, self.survey_user.partner_id)
for question in questions:
if question.question_type == 'char_box':
question_answer = self._add_answer_line(question, user_input, 'Char box answer')
self.assertEqual(question_answer.display_name, 'Char box answer')
elif question.question_type == 'text_box':
question_answer = self._add_answer_line(question, user_input, 'Text box answer')
self.assertEqual(question_answer.display_name, 'Text box answer')
elif question.question_type == 'numerical_box':
question_answer = self._add_answer_line(question, user_input, 7)
self.assertEqual(question_answer.display_name, '7.0')
elif question.question_type == 'date':
question_answer = self._add_answer_line(question, user_input, fields.Datetime.now())
self.assertEqual(question_answer.display_name, '2020-02-15')
elif question.question_type == 'datetime':
question_answer = self._add_answer_line(question, user_input, fields.Datetime.now())
self.assertEqual(question_answer.display_name, '2020-02-15 18:00:00')
elif question.question_type == 'simple_choice':
question_answer = self._add_answer_line(question, user_input, question.suggested_answer_ids[0].id)
self.assertEqual(question_answer.display_name, 'SChoice0')
elif question.question_type == 'multiple_choice':
question_answer_1 = self._add_answer_line(question, user_input, question.suggested_answer_ids[0].id)
self.assertEqual(question_answer_1.display_name, 'MChoice0')
question_answer_2 = self._add_answer_line(question, user_input, question.suggested_answer_ids[1].id)
self.assertEqual(question_answer_2.display_name, 'MChoice1')
elif question.question_type == 'matrix':
question_answer_1 = self._add_answer_line(question, user_input,
question.suggested_answer_ids[0].id, **{'answer_value_row': question.matrix_row_ids[0].id})
self.assertEqual(question_answer_1.display_name, 'Column0: Row0')
question_answer_2 = self._add_answer_line(question, user_input,
question.suggested_answer_ids[0].id, **{'answer_value_row': question.matrix_row_ids[1].id})
self.assertEqual(question_answer_2.display_name, 'Column0: Row1')
@users('survey_manager')
def test_answer_validation_mandatory(self):
""" For each type of question check that mandatory questions correctly check for complete answers """
for question in self._create_one_question_per_type():
self.assertDictEqual(
question.validate_question(''),
{question.id: 'TestError'}
)
@users('survey_manager')
def test_answer_validation_date(self):
question = self._add_question(
self.page_0, 'Q0', 'date', validation_required=True,
validation_min_date='2015-03-20', validation_max_date='2015-03-25', validation_error_msg='ValidationError')
self.assertEqual(
question.validate_question('Is Alfred an answer ?'),
{question.id: _('This is not a date')}
)
self.assertEqual(
question.validate_question('2015-03-19'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('2015-03-26'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('2015-03-25'),
{}
)
@users('survey_manager')
def test_answer_validation_numerical(self):
question = self._add_question(
self.page_0, 'Q0', 'numerical_box', validation_required=True,
validation_min_float_value=2.2, validation_max_float_value=3.3, validation_error_msg='ValidationError')
self.assertEqual(
question.validate_question('Is Alfred an answer ?'),
{question.id: _('This is not a number')}
)
self.assertEqual(
question.validate_question('2.0'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('4.0'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('2.9'),
{}
)
@users('survey_manager')
def test_answer_validation_char_box_email(self):
question = self._add_question(self.page_0, 'Q0', 'char_box', validation_email=True)
self.assertEqual(
question.validate_question('not an email'),
{question.id: _('This answer must be an email address')}
)
self.assertEqual(
question.validate_question('email@example.com'),
{}
)
@users('survey_manager')
def test_answer_validation_char_box_length(self):
question = self._add_question(
self.page_0, 'Q0', 'char_box', validation_required=True,
validation_length_min=2, validation_length_max=8, validation_error_msg='ValidationError')
self.assertEqual(
question.validate_question('l'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('waytoomuchlonganswer'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('valid'),
{}
)
def test_partial_scores_simple_choice(self):
"""" Check that if partial scores are given for partially correct answers, in the case of a multiple
choice question with single choice, choosing the answer with max score gives 100% of points. """
partial_scores_survey = self.env['survey.survey'].create({
'title': 'How much do you know about words?',
'scoring_type': 'scoring_with_answers',
'scoring_success_min': 90.0,
})
[a_01, a_02, a_03] = self.env['survey.question.answer'].create([{
'value': 'A thing full of letters.',
'answer_score': 1.0
}, {
'value': 'A unit of language, [...], carrying a meaning.',
'answer_score': 4.0,
'is_correct': True
}, {
'value': '42',
'answer_score': -4.0
}])
q_01 = self.env['survey.question'].create({
'survey_id': partial_scores_survey.id,
'title': 'What is a word?',
'sequence': 1,
'question_type': 'simple_choice',
'suggested_answer_ids': [(6, 0, (a_01 | a_02 | a_03).ids)]
})
user_input = self.env['survey.user_input'].create({'survey_id': partial_scores_survey.id})
self.env['survey.user_input.line'].create({
'user_input_id': user_input.id,
'question_id': q_01.id,
'answer_type': 'suggestion',
'suggested_answer_id': a_02.id
})
# Check that scoring is correct and survey is passed
self.assertEqual(user_input.scoring_percentage, 100)
self.assertTrue(user_input.scoring_success)
@users('survey_manager')
def test_skipped_values(self):
""" Create one question per type of questions.
Make sure they are correctly registered as 'skipped' after saving an empty answer for each
of them. """
questions = self._create_one_question_per_type()
survey_user = self.survey._create_answer(user=self.survey_user)
for question in questions:
answer = '' if question.question_type in ['char_box', 'text_box'] else None
survey_user.save_lines(question, answer)
for question in questions:
self._assert_skipped_question(question, survey_user)
@users('survey_manager')
def test_copy_conditional_question_settings(self):
""" Create a survey with conditional layout, clone it and verify that the cloned survey has the same conditional
layout as the original survey.
The test also check that the cloned survey doesn't reference the original survey.
"""
def get_question_by_title(survey, title):
return survey.question_ids.filtered(lambda q: q.title == title)[0]
# Create the survey questions (! texts of the questions must be unique as they are used to query them)
q_is_vegetarian_text = 'Are you vegetarian ?'
q_is_vegetarian = self._add_question(
self.page_0, q_is_vegetarian_text, 'multiple_choice', survey_id=self.survey.id,
sequence=100, labels=[{'value': 'Yes'}, {'value': 'No'}])
q_food_vegetarian_text = 'Choose your green meal'
self._add_question(self.page_0, q_food_vegetarian_text, 'multiple_choice',
is_conditional=True, sequence=101,
triggering_question_id=q_is_vegetarian.id,
triggering_answer_id=q_is_vegetarian.suggested_answer_ids[0].id,
survey_id=self.survey.id,
labels=[{'value': 'Vegetarian pizza'}, {'value': 'Vegetarian burger'}])
q_food_not_vegetarian_text = 'Choose your meal'
self._add_question(self.page_0, q_food_not_vegetarian_text, 'multiple_choice',
is_conditional=True, sequence=102,
triggering_question_id=q_is_vegetarian.id,
triggering_answer_id=q_is_vegetarian.suggested_answer_ids[1].id,
survey_id=self.survey.id,
labels=[{'value': 'Steak with french fries'}, {'value': 'Fish'}])
# Clone the survey
survey_clone = self.survey.copy()
# Verify the conditional layout and that the cloned survey doesn't reference the original survey
q_is_vegetarian_cloned = get_question_by_title(survey_clone, q_is_vegetarian_text)
q_food_vegetarian_cloned = get_question_by_title(survey_clone, q_food_vegetarian_text)
q_food_not_vegetarian_cloned = get_question_by_title(survey_clone, q_food_not_vegetarian_text)
self.assertFalse(q_is_vegetarian_cloned.is_conditional)
# Vegetarian choice
self.assertTrue(q_food_vegetarian_cloned)
# Correct conditional layout
self.assertEqual(q_food_vegetarian_cloned.triggering_question_id.id, q_is_vegetarian_cloned.id)
self.assertEqual(q_food_vegetarian_cloned.triggering_answer_id.id,
q_is_vegetarian_cloned.suggested_answer_ids[0].id)
# Doesn't reference the original survey
self.assertNotEqual(q_food_vegetarian_cloned.triggering_question_id.id, q_is_vegetarian.id)
self.assertNotEqual(q_food_vegetarian_cloned.triggering_answer_id.id,
q_is_vegetarian.suggested_answer_ids[0].id)
# Not vegetarian choice
self.assertTrue(q_food_not_vegetarian_cloned.is_conditional)
# Correct conditional layout
self.assertEqual(q_food_not_vegetarian_cloned.triggering_question_id.id, q_is_vegetarian_cloned.id)
self.assertEqual(q_food_not_vegetarian_cloned.triggering_answer_id.id,
q_is_vegetarian_cloned.suggested_answer_ids[1].id)
# Doesn't reference the original survey
self.assertNotEqual(q_food_not_vegetarian_cloned.triggering_question_id.id, q_is_vegetarian.id)
self.assertNotEqual(q_food_not_vegetarian_cloned.triggering_answer_id.id,
q_is_vegetarian.suggested_answer_ids[1].id)
@users('survey_manager')
def test_copy_conditional_question_with_sequence_changed(self):
""" Create a survey with two questions, change the sequence of the questions,
set the second question as conditional on the first one, and check that the conditional
question is still conditional on the first one after copying the survey."""
def get_question_by_title(survey, title):
return survey.question_ids.filtered(lambda q: q.title == title)[0]
# Create the survey questions
q_1 = self._add_question(
self.page_0, 'Q1', 'multiple_choice', survey_id=self.survey.id,
sequence=200, labels=[{'value': 'Yes'}, {'value': 'No'}])
q_2 = self._add_question(
self.page_0, 'Q2', 'multiple_choice', survey_id=self.survey.id,
sequence=300, labels=[{'value': 'Yes'}, {'value': 'No'}])
# Change the sequence of the second question to be before the first one
q_2.write({'sequence': 100})
# Set a conditional question on the first question
q_1.write({
'is_conditional': True,
'triggering_question_id': q_2.id,
'triggering_answer_id': q_2.suggested_answer_ids[0].id,
})
(q_1 | q_2).invalidate_recordset()
# Clone the survey
cloned_survey = self.survey.copy()
# Check that the sequence of the questions are the same as the original survey
self.assertEqual(get_question_by_title(cloned_survey, 'Q1').sequence, q_1.sequence)
self.assertEqual(get_question_by_title(cloned_survey, 'Q2').sequence, q_2.sequence)
# Check that the conditional question is correctly copied to the right question
self.assertEqual(get_question_by_title(cloned_survey, 'Q1').triggering_question_id.title, q_1.triggering_question_id.title)
self.assertFalse(get_question_by_title(cloned_survey, 'Q2').triggering_question_id)
def test_get_pages_and_questions_to_show(self):
"""
Tests the method `_get_pages_and_questions_to_show` - it takes a recordset of
question.question from a survey.survey and returns a recordset without
invalid conditional questions and pages without description
Structure of the test survey:
sequence | type | trigger | validity
----------------------------------------------------------------------
1 | page, no description | / | X
2 | text_box | trigger is 6 | X
3 | numerical_box | trigger is 2 | X
4 | simple_choice | / | V
5 | page, description | / | V
6 | multiple_choice | / | V
7 | multiple_choice, no answers | / | V
8 | text_box | trigger is 6 | V
9 | matrix | trigger is 5 | X
10 | simple_choice | trigger is 7 | X
11 | simple_choice, no answers | trigger is 8 | X
12 | text_box | trigger is 11 | X
"""
my_survey = self.env['survey.survey'].create({
'title': 'my_survey',
'questions_layout': 'page_per_question',
'questions_selection': 'all',
'access_mode': 'public',
})
[
page_without_description,
text_box_1,
numerical_box,
_simple_choice_1,
page_with_description,
multiple_choice_1,
multiple_choice_2,
text_box_2,
matrix,
simple_choice_2,
simple_choice_3,
text_box_3,
] = self.env['survey.question'].create([{
'title': 'no desc',
'survey_id': my_survey.id,
'sequence': 1,
'question_type': False,
'is_page': True,
'description': False,
}, {
'title': 'text_box with invalid trigger',
'survey_id': my_survey.id,
'sequence': 2,
'is_page': False,
'question_type': 'simple_choice',
}, {
'title': 'numerical box with trigger that is invalid',
'survey_id': my_survey.id,
'sequence': 3,
'is_page': False,
'question_type': 'numerical_box',
}, {
'title': 'valid simple_choice',
'survey_id': my_survey.id,
'sequence': 4,
'is_page': False,
'question_type': 'simple_choice',
'suggested_answer_ids': [(0, 0, {'value': 'a'})],
}, {
'title': 'with desc',
'survey_id': my_survey.id,
'sequence': 5,
'is_page': True,
'question_type': False,
'description': 'This page has a description',
}, {
'title': 'multiple choice not conditional',
'survey_id': my_survey.id,
'sequence': 6,
'is_page': False,
'question_type': 'multiple_choice',
'suggested_answer_ids': [(0, 0, {'value': 'a'})]
}, {
'title': 'multiple_choice with no answers',
'survey_id': my_survey.id,
'sequence': 7,
'is_page': False,
'question_type': 'multiple_choice',
}, {
'title': 'text_box with valid trigger',
'survey_id': my_survey.id,
'sequence': 8,
'is_page': False,
'question_type': 'text_box',
}, {
'title': 'matrix with invalid trigger (page)',
'survey_id': my_survey.id,
'sequence': 9,
'is_page': False,
'question_type': 'matrix',
}, {
'title': 'simple choice w/ invalid trigger (no suggested_answer_ids)',
'survey_id': my_survey.id,
'sequence': 10,
'is_page': False,
'question_type': 'simple_choice',
}, {
'title': 'text_box w/ invalid trigger (not a mcq)',
'survey_id': my_survey.id,
'sequence': 11,
'is_page': False,
'question_type': 'simple_choice',
'suggested_answer_ids': False,
}, {
'title': 'text_box w/ invalid trigger (suggested_answer_ids is False)',
'survey_id': my_survey.id,
'sequence': 12,
'is_page': False,
'question_type': 'text_box',
}])
text_box_1.write({'is_conditional': True, 'triggering_question_id': multiple_choice_1.id})
numerical_box.write({'is_conditional': True, 'triggering_question_id': text_box_1.id})
text_box_2.write({'is_conditional': True, 'triggering_question_id': multiple_choice_1.id})
matrix.write({'is_conditional': True, 'triggering_question_id': page_with_description.id})
simple_choice_2.write({'is_conditional': True, 'triggering_question_id': multiple_choice_2.id})
simple_choice_3.write({'is_conditional': True, 'triggering_question_id': text_box_2.id})
text_box_3.write({'is_conditional': True, 'triggering_question_id': simple_choice_3.id})
invalid_records = page_without_description + text_box_1 + numerical_box \
+ matrix + simple_choice_2 + simple_choice_3 + text_box_3
question_and_page_ids = my_survey.question_and_page_ids
returned_questions_and_pages = my_survey._get_pages_and_questions_to_show()
self.assertEqual(question_and_page_ids - invalid_records, returned_questions_and_pages)
def test_survey_session_leaderboard(self):
"""Check leaderboard rendering with small (max) scores values."""
start_time = fields.datetime(2023, 7, 7, 12, 0, 0)
test_survey = self.env['survey.survey'].create({
'title': 'Test This Survey',
'scoring_type': 'scoring_with_answers',
'session_question_start_time': start_time,
'session_start_time': start_time,
'session_state': 'in_progress',
'question_and_page_ids': [
Command.create({
'question_type': 'simple_choice',
'suggested_answer_ids': [
Command.create({'value': 'In Asia', 'answer_score': 0.125, 'is_correct': True}),
Command.create({'value': 'In Europe', 'answer_score': 0., 'is_correct': False}),
],
'title': 'Where is india?',
}),
]
})
question_1 = test_survey.question_and_page_ids[0]
answer_correct = question_1.suggested_answer_ids[0]
user_input = self.env['survey.user_input'].create({'survey_id': test_survey.id, 'is_session_answer': True})
user_input_line = self.env['survey.user_input.line'].create({
'user_input_id': user_input.id,
'question_id': question_1.id,
'answer_type': 'suggestion',
'suggested_answer_id': answer_correct.id,
})
self.assertEqual(user_input_line.answer_score, 0.125)
self.env['ir.qweb']._render('survey.user_input_session_leaderboard', {
'animate': True,
'leaderboard': test_survey._prepare_leaderboard_values()
})

View file

@ -0,0 +1,70 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests import common
class TestSurveyComputePagesQuestions(common.TestSurveyCommon):
def test_compute_pages_questions(self):
with self.with_user('survey_manager'):
survey = self.env['survey.survey'].create({
'title': 'Test compute survey',
})
page_0 = self.env['survey.question'].create({
'is_page': True,
'question_type': False,
'sequence': 1,
'title': 'P1',
'survey_id': survey.id
})
page0_q0 = self._add_question(page_0, 'Q1', 'text_box', survey_id=survey.id)
page0_q1 = self._add_question(page_0, 'Q2', 'text_box', survey_id=survey.id)
page0_q2 = self._add_question(page_0, 'Q3', 'text_box', survey_id=survey.id)
page0_q3 = self._add_question(page_0, 'Q4', 'text_box', survey_id=survey.id)
page0_q4 = self._add_question(page_0, 'Q5', 'text_box', survey_id=survey.id)
page_1 = self.env['survey.question'].create({
'is_page': True,
'question_type': False,
'sequence': 7,
'title': 'P2',
'survey_id': survey.id,
})
page1_q0 = self._add_question(page_1, 'Q6', 'text_box', survey_id=survey.id)
page1_q1 = self._add_question(page_1, 'Q7', 'text_box', survey_id=survey.id)
page1_q2 = self._add_question(page_1, 'Q8', 'text_box', survey_id=survey.id)
page1_q3 = self._add_question(page_1, 'Q9', 'text_box', survey_id=survey.id)
self.assertEqual(len(survey.page_ids), 2, "Survey should have 2 pages")
self.assertIn(page_0, survey.page_ids, "Page 1 should be contained in survey's page_ids")
self.assertIn(page_1, survey.page_ids, "Page 2 should be contained in survey's page_ids")
self.assertEqual(len(page_0.question_ids), 5, "Page 1 should have 5 questions")
self.assertIn(page0_q0, page_0.question_ids, "Question 1 should be in page 1")
self.assertIn(page0_q1, page_0.question_ids, "Question 2 should be in page 1")
self.assertIn(page0_q2, page_0.question_ids, "Question 3 should be in page 1")
self.assertIn(page0_q3, page_0.question_ids, "Question 4 should be in page 1")
self.assertIn(page0_q4, page_0.question_ids, "Question 5 should be in page 1")
self.assertEqual(len(page_1.question_ids), 4, "Page 2 should have 4 questions")
self.assertIn(page1_q0, page_1.question_ids, "Question 6 should be in page 2")
self.assertIn(page1_q1, page_1.question_ids, "Question 7 should be in page 2")
self.assertIn(page1_q2, page_1.question_ids, "Question 8 should be in page 2")
self.assertIn(page1_q3, page_1.question_ids, "Question 9 should be in page 2")
self.assertEqual(page0_q0.page_id, page_0, "Question 1 should belong to page 1")
self.assertEqual(page0_q1.page_id, page_0, "Question 2 should belong to page 1")
self.assertEqual(page0_q2.page_id, page_0, "Question 3 should belong to page 1")
self.assertEqual(page0_q3.page_id, page_0, "Question 4 should belong to page 1")
self.assertEqual(page0_q4.page_id, page_0, "Question 5 should belong to page 1")
self.assertEqual(page1_q0.page_id, page_1, "Question 6 should belong to page 2")
self.assertEqual(page1_q1.page_id, page_1, "Question 7 should belong to page 2")
self.assertEqual(page1_q2.page_id, page_1, "Question 8 should belong to page 2")
self.assertEqual(page1_q3.page_id, page_1, "Question 9 should belong to page 2")
# move 1 question from page 1 to page 2
page0_q2.write({'sequence': 12})
page0_q2._compute_page_id()
self.assertEqual(page0_q2.page_id, page_1, "Question 3 should now belong to page 2")

View file

@ -0,0 +1,122 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests import common
from odoo.tests import tagged
from odoo.tests.common import HttpCase
@tagged('-at_install', 'post_install', 'functional')
class TestSurveyFlow(common.TestSurveyCommon, HttpCase):
def _format_submission_data(self, page, answer_data, additional_post_data):
post_data = {}
post_data['page_id'] = page.id
for question_id, answer_vals in answer_data.items():
question = page.question_ids.filtered(lambda q: q.id == question_id)
post_data.update(self._prepare_post_data(question, answer_vals['value'], post_data))
post_data.update(**additional_post_data)
return post_data
def test_flow_public(self):
# Step: survey manager creates the survey
# --------------------------------------------------
with self.with_user('survey_manager'):
survey = self.env['survey.survey'].create({
'title': 'Public Survey for Tarte Al Djotte',
'access_mode': 'public',
'users_login_required': False,
'questions_layout': 'page_per_section',
})
# First page is about customer data
page_0 = self.env['survey.question'].create({
'is_page': True,
'question_type': False,
'sequence': 1,
'title': 'Page1: Your Data',
'survey_id': survey.id,
})
page0_q0 = self._add_question(
page_0, 'What is your name', 'text_box',
comments_allowed=False,
constr_mandatory=True, constr_error_msg='Please enter your name', survey_id=survey.id)
page0_q1 = self._add_question(
page_0, 'What is your age', 'numerical_box',
comments_allowed=False,
constr_mandatory=True, constr_error_msg='Please enter your name', survey_id=survey.id)
# Second page is about tarte al djotte
page_1 = self.env['survey.question'].create({
'is_page': True,
'question_type': False,
'sequence': 4,
'title': 'Page2: Tarte Al Djotte',
'survey_id': survey.id,
})
page1_q0 = self._add_question(
page_1, 'What do you like most in our tarte al djotte', 'multiple_choice',
labels=[{'value': 'The gras'},
{'value': 'The bette'},
{'value': 'The tout'},
{'value': 'The regime is fucked up'}], survey_id=survey.id)
# fetch starting data to check only newly created data during this flow
answers = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
answer_lines = self.env['survey.user_input.line'].search([('survey_id', '=', survey.id)])
self.assertEqual(answers, self.env['survey.user_input'])
self.assertEqual(answer_lines, self.env['survey.user_input.line'])
# Step: customer takes the survey
# --------------------------------------------------
# Customer opens start page
r = self._access_start(survey)
self.assertResponse(r, 200, [survey.title])
# -> this should have generated a new answer with a token
answers = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
self.assertEqual(len(answers), 1)
answer_token = answers.access_token
self.assertTrue(answer_token)
self.assertAnswer(answers, 'new', self.env['survey.question'])
# Customer begins survey with first page
r = self._access_page(survey, answer_token)
self.assertResponse(r, 200)
self.assertAnswer(answers, 'new', self.env['survey.question'])
csrf_token = self._find_csrf_token(r.text)
r = self._access_begin(survey, answer_token)
self.assertResponse(r, 200)
# Customer submit first page answers
answer_data = {
page0_q0.id: {'value': ['Alfred Poilvache']},
page0_q1.id: {'value': ['44.0']},
}
post_data = self._format_submission_data(page_0, answer_data, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': 'next'})
r = self._access_submit(survey, answer_token, post_data)
self.assertResponse(r, 200)
answers.invalidate_recordset() # TDE note: necessary as lots of sudo in controllers messing with cache
# -> this should have generated answer lines
self.assertAnswer(answers, 'in_progress', page_0)
self.assertAnswerLines(page_0, answers, answer_data)
# Customer is redirected on second page and begins filling it
r = self._access_page(survey, answer_token)
self.assertResponse(r, 200)
csrf_token = self._find_csrf_token(r.text)
# Customer submit second page answers
answer_data = {
page1_q0.id: {'value': [page1_q0.suggested_answer_ids.ids[0], page1_q0.suggested_answer_ids.ids[1]]},
}
post_data = self._format_submission_data(page_1, answer_data, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': 'next'})
r = self._access_submit(survey, answer_token, post_data)
self.assertResponse(r, 200)
answers.invalidate_recordset() # TDE note: necessary as lots of sudo in controllers messing with cache
# -> this should have generated answer lines and closed the answer
self.assertAnswer(answers, 'done', page_1)
self.assertAnswerLines(page_1, answers, answer_data)

View file

@ -0,0 +1,126 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests import common
from odoo.tests import tagged
from odoo.tests.common import HttpCase
@tagged('-at_install', 'post_install', 'functional')
class TestSurveyFlowWithConditions(common.TestSurveyCommon, HttpCase):
def test_conditional_flow_with_scoring(self):
with self.with_user('survey_user'):
survey = self.env['survey.survey'].create({
'title': 'Survey',
'access_mode': 'public',
'questions_layout': 'page_per_section',
'scoring_type': 'scoring_with_answers',
'scoring_success_min': 85.0,
})
page_0 = self.env['survey.question'].with_user(self.survey_manager).create({
'title': 'First page',
'survey_id': survey.id,
'sequence': 1,
'is_page': True,
'question_type': False,
})
q01 = self._add_question(
page_0, 'Question 1', 'simple_choice',
sequence=1,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2'},
{'value': 'Answer 3'},
{'value': 'Answer 4', 'is_correct': True, 'answer_score': 1.0}
])
q02 = self._add_question(
page_0, 'Question 2', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
is_conditional=True, triggering_question_id=q01.id, triggering_answer_id=q01.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
{'value': 'Answer 3'},
{'value': 'Answer 4'}
])
q03 = self._add_question(
page_0, 'Question 3', 'simple_choice',
sequence=1,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2'},
{'value': 'Answer 3'},
{'value': 'Answer 4', 'is_correct': True, 'answer_score': 1.0}
])
self._add_question( # q04
page_0, 'Question 4', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
is_conditional=True, triggering_question_id=q03.id, triggering_answer_id=q03.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
{'value': 'Answer 3'},
{'value': 'Answer 4'}
])
q05 = self._add_question(
page_0, 'Question 5', 'simple_choice',
sequence=1,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2'},
{'value': 'Answer 3'},
{'value': 'Answer 4', 'is_correct': True, 'answer_score': 1.0}
])
q06 = self._add_question(
page_0, 'Question 6', 'simple_choice',
sequence=2,
constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
is_conditional=True, triggering_question_id=q05.id, triggering_answer_id=q05.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
labels=[
{'value': 'Answer 1'},
{'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
{'value': 'Answer 3'},
{'value': 'Answer 4'}
])
# User opens start page
self._access_start(survey)
# -> this should have generated a new user_input with a token
user_inputs = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
self.assertEqual(len(user_inputs), 1)
answer_token = user_inputs.access_token
# User begins survey with first page
response = self._access_page(survey, answer_token)
self.assertResponse(response, 200)
csrf_token = self._find_csrf_token(response.text)
r = self._access_begin(survey, answer_token)
self.assertResponse(r, 200)
answers = {
q01: q01.suggested_answer_ids[3], # Right
q02: q02.suggested_answer_ids[1], # Right
q03: q03.suggested_answer_ids[0], # Wrong
q05: q05.suggested_answer_ids[3], # Right
q06: q06.suggested_answer_ids[2], # Wrong
}
self._answer_page(page_0, answers, answer_token, csrf_token)
user_inputs.invalidate_recordset()
self.assertEqual(round(user_inputs.scoring_percentage), 60, "Three right answers out of five (the fourth one is still hidden)")
self.assertFalse(user_inputs.scoring_success)

View file

@ -0,0 +1,308 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from dateutil.relativedelta import relativedelta
from lxml import etree
from odoo import fields, Command
from odoo.addons.survey.tests import common
from odoo.addons.test_mail.tests.common import MailCommon
from odoo.exceptions import UserError
from odoo.tests import Form
from odoo.tests.common import users
class TestSurveyInvite(common.TestSurveyCommon, MailCommon):
def setUp(self):
res = super(TestSurveyInvite, self).setUp()
# by default signup not allowed
self.env["ir.config_parameter"].set_param('auth_signup.invitation_scope', 'b2b')
view = self.env.ref('survey.survey_invite_view_form').sudo()
tree = etree.fromstring(view.arch)
# Remove the invisible on `emails` to be able to test the onchange `_onchange_emails`
# which raises an error when attempting to change `emails`
# while the survey is set with `users_login_required` to True
# By default, `<field name="emails"/>` is invisible when `survey_users_login_required` is True,
# making it normally impossible to change by the user in the web client by default.
# For tests `test_survey_invite_authentication_nosignup` and `test_survey_invite_token_internal`
tree.xpath('//field[@name="emails"]')[0].attrib.pop('attrs')
view.arch = etree.tostring(tree)
return res
@users('survey_manager')
def test_survey_invite_action(self):
# Check correctly configured survey returns an invite wizard action
action = self.survey.action_send_survey()
self.assertEqual(action['res_model'], 'survey.invite')
# Bad cases
surveys = [
# no page
self.env['survey.survey'].create({'title': 'Test survey'}),
# no questions
self.env['survey.survey'].create({'title': 'Test survey', 'question_and_page_ids': [(0, 0, {'is_page': True, 'question_type': False, 'title': 'P0', 'sequence': 1})]}),
# closed
self.env['survey.survey'].with_user(self.survey_manager).create({
'title': 'S0',
'active': False,
'question_and_page_ids': [
(0, 0, {'is_page': True, 'question_type': False, 'title': 'P0', 'sequence': 1}),
(0, 0, {'title': 'Q0', 'sequence': 2, 'question_type': 'text_box'})
]
})
]
for survey in surveys:
with self.assertRaises(UserError):
survey.action_send_survey()
@users('survey_manager')
def test_survey_invite(self):
Answer = self.env['survey.user_input']
deadline = fields.Datetime.now() + relativedelta(months=1)
self.survey.write({'access_mode': 'public', 'users_login_required': False})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
# some lowlevel checks that action is correctly configured
self.assertEqual(Answer.search([('survey_id', '=', self.survey.id)]), self.env['survey.user_input'])
self.assertEqual(invite_form.survey_id, self.survey)
invite_form.partner_ids.add(self.customer)
invite_form.deadline = fields.Datetime.to_string(deadline)
invite = invite_form.save()
invite.action_invite()
answers = Answer.search([('survey_id', '=', self.survey.id)])
self.assertEqual(len(answers), 1)
self.assertEqual(
set(answers.mapped('email')),
set([self.customer.email]))
self.assertEqual(answers.mapped('partner_id'), self.customer)
self.assertEqual(set(answers.mapped('deadline')), set([deadline]))
with self.subTest('Warning when inviting an already invited partner'):
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form.partner_ids.add(self.customer)
self.assertIn(self.customer, invite_form.existing_partner_ids)
self.assertEqual(invite_form.existing_text,
'The following customers have already received an invite: Caroline Customer.')
@users('survey_manager')
def test_survey_invite_authentication_nosignup(self):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'public', 'users_login_required': True})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
with self.assertRaises(UserError): # do not allow to add customer (partner without user)
invite_form.partner_ids.add(self.customer)
invite_form.partner_ids.clear()
invite_form.partner_ids.add(self.user_portal.partner_id)
invite_form.partner_ids.add(self.user_emp.partner_id)
with self.assertRaises(UserError):
invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
invite_form.emails = False
invite = invite_form.save()
invite.action_invite()
answers = Answer.search([('survey_id', '=', self.survey.id)])
self.assertEqual(len(answers), 2)
self.assertEqual(
set(answers.mapped('email')),
set([self.user_emp.email, self.user_portal.email]))
self.assertEqual(answers.mapped('partner_id'), self.user_emp.partner_id | self.user_portal.partner_id)
@users('survey_manager')
def test_survey_invite_authentication_signup(self):
self.env["ir.config_parameter"].sudo().set_param('auth_signup.invitation_scope', 'b2c')
self.env.invalidate_all()
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'public', 'users_login_required': True})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form.partner_ids.add(self.customer)
invite_form.partner_ids.add(self.user_portal.partner_id)
invite_form.partner_ids.add(self.user_emp.partner_id)
# TDE FIXME: not sure for emails in authentication + signup
# invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
invite = invite_form.save()
invite.action_invite()
answers = Answer.search([('survey_id', '=', self.survey.id)])
self.assertEqual(len(answers), 3)
self.assertEqual(
set(answers.mapped('email')),
set([self.customer.email, self.user_emp.email, self.user_portal.email]))
self.assertEqual(answers.mapped('partner_id'), self.customer | self.user_emp.partner_id | self.user_portal.partner_id)
@users('survey_manager')
def test_survey_invite_email_from(self):
# Verifies whether changing the value of the "email_from" field reflects on the receiving end.
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form.partner_ids.add(self.survey_user.partner_id)
invite_form.template_id.write({'email_from':'{{ object.partner_id.email_formatted }}'})
invite = invite_form.save()
with self.mock_mail_gateway():
invite.action_invite()
self.assertEqual(len(self._new_mails), 1, "A new mail.mail should have been created")
mail = self._new_mails[0]
self.assertEqual(mail.email_from, self.survey_user.email_formatted)
@users('survey_manager')
def test_survey_invite_public(self):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'public', 'users_login_required': False})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form.partner_ids.add(self.customer)
invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
invite = invite_form.save()
invite.action_invite()
answers = Answer.search([('survey_id', '=', self.survey.id)])
self.assertEqual(len(answers), 3)
self.assertEqual(
set(answers.mapped('email')),
set(['test1@example.com', '"Raoulette Vignolette" <test2@example.com>', self.customer.email]))
self.assertEqual(answers.mapped('partner_id'), self.customer)
@users('survey_manager')
def test_survey_invite_token(self):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'token', 'users_login_required': False})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form.partner_ids.add(self.customer)
invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
invite = invite_form.save()
invite.action_invite()
answers = Answer.search([('survey_id', '=', self.survey.id)])
self.assertEqual(len(answers), 3)
self.assertEqual(
set(answers.mapped('email')),
set(['test1@example.com', '"Raoulette Vignolette" <test2@example.com>', self.customer.email]))
self.assertEqual(answers.mapped('partner_id'), self.customer)
@users('survey_manager')
def test_survey_invite_token_internal(self):
Answer = self.env['survey.user_input']
self.survey.write({'access_mode': 'token', 'users_login_required': True})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
with self.assertRaises(UserError): # do not allow to add customer (partner without user)
invite_form.partner_ids.add(self.customer)
with self.assertRaises(UserError): # do not allow to add portal user
invite_form.partner_ids.add(self.user_portal.partner_id)
invite_form.partner_ids.clear()
invite_form.partner_ids.add(self.user_emp.partner_id)
with self.assertRaises(UserError):
invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
invite_form.emails = False
invite = invite_form.save()
invite.action_invite()
answers = Answer.search([('survey_id', '=', self.survey.id)])
self.assertEqual(len(answers), 1)
self.assertEqual(
set(answers.mapped('email')),
set([self.user_emp.email]))
self.assertEqual(answers.mapped('partner_id'), self.user_emp.partner_id)
def test_survey_invite_token_by_email_nosignup(self):
"""
Case: have multiples partners with the same email address
If I set one email address, I expect one email to be sent
"""
first_partner = self.env['res.partner'].create({
'name': 'Test 1',
'email': 'test@example.com',
})
self.env['res.partner'].create({
'name': 'Test 2',
'email': '"Raoul Poilvache" <TEST@example.COM>',
})
self.survey.write({'access_mode': 'token', 'users_login_required': False})
action = self.survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form.emails = 'test@example.com'
invite = invite_form.save()
invite.action_invite()
answers = self.env['survey.user_input'].search([('survey_id', '=', self.survey.id)])
self.assertEqual(len(answers), 1)
self.assertEqual(answers.partner_id.display_name, first_partner.display_name)
@users('survey_user')
def test_survey_invite_with_template_attachment(self):
"""
Test that a group_survey_user can send a survey that includes an attachment from the survey invite's
email template
"""
mail_template = self.env['mail.template'].create({
'name': 'test mail template',
'attachment_ids': [Command.create({
'name': 'some_attachment.pdf',
'res_model': 'mail.template',
'datas': 'test',
'type': 'binary',
})],
})
user_survey = self.env['survey.survey'].create({
'title': 'User Created Survey',
'access_mode': 'public',
'users_login_required': False,
'users_can_go_back': False,
'question_and_page_ids': [
Command.create({
'title': 'First page',
'sequence': 1,
'is_page': True,
'question_type': False,
}),
Command.create({
'title': 'Test Free Text',
'sequence': 2,
'question_type': 'text_box',
}),
]
})
action = user_survey.action_send_survey()
invite_form = Form(self.env[action['res_model']].with_context(action['context']))
invite_form.template_id = mail_template
invite_form.emails = 'test_survey_invite_with_template_attachment@odoo.gov'
invite = invite_form.save()
with self.mock_mail_gateway():
invite.action_invite()
self.assertEqual(self.env['mail.mail'].sudo().search([
('email_to', '=', 'test_survey_invite_with_template_attachment@odoo.gov')
]).attachment_ids, mail_template.attachment_ids)

View file

@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
class TestSurveyRandomize(TransactionCase):
def test_01_generate_randomized_questions(self):
""" Use random generate for a survey and verify that questions within the page are selected accordingly """
Question = self.env['survey.question'].sudo()
question_and_pages = self.env['survey.question']
page_1 = Question.create({
'title': 'Page 1',
'is_page': True,
'question_type': False,
'sequence': 1,
'random_questions_count': 3
})
question_and_pages |= page_1
question_and_pages = self._add_questions(question_and_pages, page_1, 5)
page_2 = Question.create({
'title': 'Page 2',
'is_page': True,
'question_type': False,
'sequence': 100,
'random_questions_count': 5
})
question_and_pages |= page_2
question_and_pages = self._add_questions(question_and_pages, page_2, 10)
page_3 = Question.create({
'title': 'Page 2',
'is_page': True,
'question_type': False,
'sequence': 1000,
'random_questions_count': 4
})
question_and_pages |= page_3
question_and_pages = self._add_questions(question_and_pages, page_3, 2)
self.survey1 = self.env['survey.survey'].sudo().create({
'title': "S0",
'question_and_page_ids': [(6, 0, question_and_pages.ids)],
'questions_selection': 'random'
})
generated_questions = self.survey1._prepare_user_input_predefined_questions()
self.assertEqual(len(generated_questions.ids), 10, msg="Expected 10 unique questions")
self.assertEqual(len(generated_questions.filtered(lambda question: question.page_id == page_1)), 3, msg="Expected 3 questions in page 1")
self.assertEqual(len(generated_questions.filtered(lambda question: question.page_id == page_2)), 5, msg="Expected 5 questions in page 2")
self.assertEqual(len(generated_questions.filtered(lambda question: question.page_id == page_3)), 2, msg="Expected 2 questions in page 3")
def _add_questions(self, question_and_pages, page, count):
for i in range(count):
question_and_pages |= self.env['survey.question'].sudo().create({
'title': page.title + ' Q' + str(i + 1),
'sequence': page.sequence + (i + 1)
})
return question_and_pages

View file

@ -0,0 +1,411 @@
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
from odoo.addons.survey.tests import common
from odoo.exceptions import AccessError, UserError
from odoo.tests import tagged
from odoo.tests.common import users, HttpCase
from odoo.tools import mute_logger
@tagged('security')
class TestAccess(common.TestSurveyCommon):
def setUp(self):
super(TestAccess, self).setUp()
self.answer_0 = self._add_answer(self.survey, self.customer)
self.answer_0_0 = self._add_answer_line(self.question_ft, self.answer_0, 'Test Answer')
self.answer_0_1 = self._add_answer_line(self.question_num, self.answer_0, 5)
@mute_logger('odoo.addons.base.models.ir_model')
@users('user_emp')
def test_access_survey_employee(self):
# Create: nope
with self.assertRaises(AccessError):
self.env['survey.survey'].create({'title': 'Test Survey 2'})
with self.assertRaises(AccessError):
self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': self.survey.id})
with self.assertRaises(AccessError):
self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'page_id': self.page_0.id})
# Read: nope
with self.assertRaises(AccessError):
self.env['survey.survey'].search([('title', 'ilike', 'Test')])
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).read(['title'])
# Write: nope
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).write({'title': 'New Title'})
with self.assertRaises(AccessError):
self.page_0.with_user(self.env.user).write({'title': 'New Title'})
with self.assertRaises(AccessError):
self.question_ft.with_user(self.env.user).write({'question': 'New Title'})
# Unlink: nope
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.page_0.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.question_ft.with_user(self.env.user).unlink()
@mute_logger('odoo.addons.base.models.ir_model')
@users('user_portal')
def test_access_survey_portal(self):
# Create: nope
with self.assertRaises(AccessError):
self.env['survey.survey'].create({'title': 'Test Survey 2'})
with self.assertRaises(AccessError):
self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': self.survey.id})
with self.assertRaises(AccessError):
self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'page_id': self.page_0.id})
# Read: nope
with self.assertRaises(AccessError):
self.env['survey.survey'].search([('title', 'ilike', 'Test')])
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).read(['title'])
# Write: nope
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).write({'title': 'New Title'})
with self.assertRaises(AccessError):
self.page_0.with_user(self.env.user).write({'title': 'New Title'})
with self.assertRaises(AccessError):
self.question_ft.with_user(self.env.user).write({'question': 'New Title'})
# Unlink: nope
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.page_0.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.question_ft.with_user(self.env.user).unlink()
@mute_logger('odoo.addons.base.models.ir_model')
@users('user_public')
def test_access_survey_public(self):
# Create: nope
with self.assertRaises(AccessError):
self.env['survey.survey'].create({'title': 'Test Survey 2'})
with self.assertRaises(AccessError):
self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': self.survey.id})
with self.assertRaises(AccessError):
self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'page_id': self.page_0.id})
# Read: nope
with self.assertRaises(AccessError):
self.env['survey.survey'].search([('title', 'ilike', 'Test')])
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).read(['title'])
# Write: nope
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).write({'title': 'New Title'})
with self.assertRaises(AccessError):
self.page_0.with_user(self.env.user).write({'title': 'New Title'})
with self.assertRaises(AccessError):
self.question_ft.with_user(self.env.user).write({'question': 'New Title'})
# Unlink: nope
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.page_0.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.question_ft.with_user(self.env.user).unlink()
@users('survey_manager')
def test_access_survey_survey_manager(self):
# Create: all
survey = self.env['survey.survey'].create({'title': 'Test Survey 2'})
self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': survey.id})
self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'survey_id': survey.id})
# Read: all
surveys = self.env['survey.survey'].search([('title', 'ilike', 'Test')])
self.assertEqual(surveys, self.survey | survey)
surveys.read(['title'])
# Write: all
(self.survey | survey).write({'title': 'New Title'})
# Unlink: all
(self.survey | survey).unlink()
@mute_logger('odoo.addons.base.models.ir_model')
@users('survey_user')
def test_access_survey_survey_user(self):
# Create: own only
survey = self.env['survey.survey'].create({'title': 'Test Survey 2'})
self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': survey.id})
self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'survey_id': survey.id})
# Read: all
surveys = self.env['survey.survey'].search([('title', 'ilike', 'Test')])
self.assertEqual(surveys, self.survey | survey)
surveys.read(['title'])
# Write: own only
survey.write({'title': 'New Title'})
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).write({'title': 'New Title'})
# Unlink: own only
survey.unlink()
with self.assertRaises(AccessError):
self.survey.with_user(self.env.user).unlink()
@mute_logger('odoo.addons.base.models.ir_model')
@users('user_emp')
def test_access_answers_employee(self):
# Create: nope
with self.assertRaises(AccessError):
self.env['survey.user_input'].create({'survey_id': self.survey.id})
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
# Read: nope
with self.assertRaises(AccessError):
self.env['survey.user_input'].search([('survey_id', 'in', [self.survey.id])])
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].search([('survey_id', 'in', [self.survey.id])])
with self.assertRaises(AccessError):
self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
# Write: nope
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).write({'state': 'done'})
# Unlink: nope
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.answer_0_0.with_user(self.env.user).unlink()
@mute_logger('odoo.addons.base.models.ir_model')
@users('user_portal')
def test_access_answers_portal(self):
# Create: nope
with self.assertRaises(AccessError):
self.env['survey.user_input'].create({'survey_id': self.survey.id})
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
# Read: nope
with self.assertRaises(AccessError):
self.env['survey.user_input'].search([('survey_id', 'in', [self.survey.id])])
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].search([('survey_id', 'in', [self.survey.id])])
with self.assertRaises(AccessError):
self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
# Write: nope
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).write({'state': 'done'})
# Unlink: nope
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.answer_0_0.with_user(self.env.user).unlink()
@mute_logger('odoo.addons.base.models.ir_model')
@users('user_public')
def test_access_answers_public(self):
# Create: nope
with self.assertRaises(AccessError):
self.env['survey.user_input'].create({'survey_id': self.survey.id})
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
# Read: nope
with self.assertRaises(AccessError):
self.env['survey.user_input'].search([('survey_id', 'in', [self.survey.id])])
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].search([('survey_id', 'in', [self.survey.id])])
with self.assertRaises(AccessError):
self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
# Write: nope
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).write({'state': 'done'})
# Unlink: nope
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.answer_0_0.with_user(self.env.user).unlink()
@mute_logger('odoo.addons.base.models.ir_model')
@users('survey_user')
def test_access_answers_survey_user(self):
survey_own = self.env['survey.survey'].create({'title': 'Other'})
self.env['survey.question'].create({'title': 'Other', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': survey_own.id})
question_own = self.env['survey.question'].create({'title': 'Other Question', 'sequence': 1, 'survey_id': survey_own.id})
# Create: own survey only
answer_own = self.env['survey.user_input'].create({'survey_id': survey_own.id})
with self.assertRaises(AccessError):
self.env['survey.user_input.line'].create({'question_id': question_own.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': answer_own.id})
# Read: always
answers = self.env['survey.user_input'].search([('survey_id', 'in', [survey_own.id, self.survey.id])])
self.assertEqual(answers, answer_own | self.answer_0)
answer_lines = self.env['survey.user_input.line'].search([('survey_id', 'in', [survey_own.id, self.survey.id])])
self.assertEqual(answer_lines, self.answer_0_0 | self.answer_0_1)
self.env['survey.user_input'].browse(answer_own.ids).read(['state'])
self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
# Create: own survey only (moved after read because DB not correctly rollbacked with assertRaises)
with self.assertRaises(AccessError):
answer_other = self.env['survey.user_input'].create({'survey_id': self.survey.id})
with self.assertRaises(AccessError):
answer_line_other = self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
# Write: own survey only
answer_own.write({'state': 'done'})
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).write({'state': 'done'})
# Unlink: own survey only
answer_own.unlink()
with self.assertRaises(AccessError):
self.answer_0.with_user(self.env.user).unlink()
with self.assertRaises(AccessError):
self.answer_0_0.with_user(self.env.user).unlink()
@users('survey_manager')
def test_access_answers_survey_manager(self):
admin = self.env.ref('base.user_admin')
with self.with_user(admin.login):
survey_other = self.env['survey.survey'].create({'title': 'Other'})
self.env['survey.question'].create({'title': 'Other', 'sequence': 0, 'is_page': True, 'question_type': False, 'survey_id': survey_other.id})
question_other = self.env['survey.question'].create({'title': 'Other Question', 'sequence': 1, 'survey_id': survey_other.id})
self.assertEqual(survey_other.create_uid, admin)
self.assertEqual(question_other.create_uid, admin)
# Create: always
answer_own = self.env['survey.user_input'].create({'survey_id': self.survey.id})
answer_other = self.env['survey.user_input'].create({'survey_id': survey_other.id})
answer_line_own = self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': answer_own.id})
answer_line_other = self.env['survey.user_input.line'].create({'question_id': question_other.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': answer_other.id})
# Read: always
answers = self.env['survey.user_input'].search([('survey_id', 'in', [survey_other.id, self.survey.id])])
self.assertEqual(answers, answer_own | answer_other | self.answer_0)
answer_lines = self.env['survey.user_input.line'].search([('survey_id', 'in', [survey_other.id, self.survey.id])])
self.assertEqual(answer_lines, answer_line_own | answer_line_other | self.answer_0_0 | self.answer_0_1)
self.env['survey.user_input'].browse(answer_own.ids).read(['state'])
self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
self.env['survey.user_input.line'].browse(answer_line_own.ids).read(['value_numerical_box'])
self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
# Write: always
answer_own.write({'state': 'done'})
answer_other.write({'partner_id': self.env.user.partner_id.id})
# Unlink: always
(answer_own | answer_other | self.answer_0).unlink()
@tagged('post_install', '-at_install')
class TestSurveySecurityControllers(common.TestSurveyCommon, HttpCase):
def test_survey_start_short(self):
# avoid name clash with existing data
surveys = self.env['survey.survey'].search([
('session_state', 'in', ['ready', 'in_progress'])
])
self.survey.write({
'session_state': 'ready',
'session_code': '123456',
'session_start_time': datetime.datetime.now(),
'access_mode': 'public',
'users_login_required': False,
})
# right short access token
response = self.url_open('/s/123456')
self.assertEqual(response.status_code, 200)
self.assertIn('The session will begin automatically when the host starts', response.text)
# `like` operator injection
response = self.url_open('/s/______')
self.assertFalse(self.survey.title in response.text)
# right short token, but closed survey
self.survey.action_archive()
response = self.url_open('/s/123456')
self.assertFalse(self.survey.title in response.text)
# right short token, but wrong `session_state`
self.survey.write({'session_state': False, 'active': True})
response = self.url_open('/s/123456')
self.assertFalse(self.survey.title in response.text)
def test_print_survey_access_mode_token(self):
"""Check that a survey with access_mode=token with questions defined can always be printed."""
# Case: No questions, no answers -> general print informs the user "your survey is empty"
survey = self.env['survey.survey'].with_user(self.survey_manager).create({
'title': 'Test Survey without answers',
'access_mode': 'token',
'users_login_required': False,
'users_can_go_back': False,
})
self.authenticate(self.survey_manager.login, self.survey_manager.login)
response = self.url_open(f'/survey/print/{survey.access_token}')
self.assertEqual(response.status_code, 200,
"Print request to shall succeed for a survey without questions nor answers")
self.assertIn("survey is empty", str(response.content),
"Survey print without questions nor answers should inform user that the survey is empty")
# Case: a question, no answers -> general print shows the question
question = self.env['survey.question'].with_user(self.survey_manager).create({
'title': 'Test Question',
'survey_id': survey.id,
'sequence': 1,
'is_page': False,
'question_type': 'char_box',
})
response = self.url_open(f'/survey/print/{survey.access_token}')
self.assertEqual(response.status_code, 200,
"Print request to shall succeed for a survey with questions but no answers")
self.assertIn(question.title, str(response.content),
"Should be possible to print a survey with a question and without answers")
# Case: a question, an answers -> general print shows the question
user_input = self._add_answer(survey, self.survey_manager.partner_id, state='done')
self._add_answer_line(question, user_input, "Test Answer")
response = self.url_open(f'/survey/print/{survey.access_token}')
self.assertEqual(response.status_code, 200,
"Print request without answer token, should be possible for a survey with questions and answers")
self.assertIn(question.title, str(response.content),
"Survey question should be visible in general print, even when answers exist and no answer_token is provided")
self.assertNotIn("Test Answer", str(response.content),
"Survey answer should not be in general print, when no answer_token is provided")
# Case: a question, an answers -> print with answer_token shows both
response = self.url_open(f'/survey/print/{survey.access_token}?answer_token={user_input.access_token}')
self.assertEqual(response.status_code, 200,
"Should be possible to print a sruvey with questions and answers")
self.assertIn(question.title, str(response.content),
"Question should appear when printing survey with using an answer_token")
self.assertIn("Test Answer", str(response.content),
"Answer should appear when printing survey with using an answer_token")

View file

@ -0,0 +1,279 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
from odoo.addons.base.tests.common import HttpCaseWithUserDemo
@odoo.tests.common.tagged('post_install', '-at_install')
class TestUiCertification(HttpCaseWithUserDemo):
def setUp(self):
super(TestUiCertification, self).setUp()
self.survey_certification = self.env['survey.survey'].create({
'title': 'MyCompany Vendor Certification',
'access_token': '4ead4bc8-b8f2-4760-a682-1fde8daaaaac',
'access_mode': 'public',
'questions_layout': 'one_page',
'users_can_go_back': True,
'users_login_required': True,
'scoring_type': 'scoring_with_answers',
'certification': True,
'certification_mail_template_id': self.env.ref('survey.mail_template_certification').id,
'is_time_limited': 'limited',
'time_limit': 10.0,
'is_attempts_limited': True,
'attempts_limit': 2,
'description': """&lt;p&gt;Test your vendor skills!.&lt;/p&gt;""",
'question_and_page_ids': [
(0, 0, {
'title': 'Products',
'sequence': 1,
'is_page': True,
'question_type': False,
'description': '&lt;p&gt;Test your knowledge of your products!&lt;/p&gt;',
}), (0, 0, {
'title': 'Do we sell Acoustic Bloc Screens?',
'sequence': 2,
'question_type': 'simple_choice',
'constr_mandatory': True,
'suggested_answer_ids': [
(0, 0, {
'value': 'No',
'sequence': 1,
}), (0, 0, {
'value': 'Yes',
'sequence': 2,
'is_correct': True,
'answer_score': 2,
})
],
}), (0, 0, {
'title': 'Select all the existing products',
'sequence': 3,
'question_type': 'multiple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Chair floor protection',
'sequence': 1,
'is_correct': True,
'answer_score': 1,
}), (0, 0, {
'value': 'Fanta',
'sequence': 2,
'answer_score': -1,
}), (0, 0, {
'value': 'Conference chair',
'sequence': 3,
'is_correct': True,
'answer_score': 1,
}), (0, 0, {
'value': 'Drawer',
'sequence': 4,
'is_correct': True,
'answer_score': 1,
}), (0, 0, {
'value': 'Customizable Lamp',
'sequence': 5,
'answer_score': -1,
})
]
}), (0, 0, {
'title': 'Select all the available customizations for our Customizable Desk',
'sequence': 4,
'question_type': 'multiple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Color',
'sequence': 1,
'is_correct': True,
'answer_score': 1,
}), (0, 0, {
'value': 'Height',
'sequence': 2,
'answer_score': -1,
}), (0, 0, {
'value': 'Width',
'sequence': 3,
'is_correct': True,
'answer_score': 1,
}), (0, 0, {
'value': 'Legs',
'sequence': 4,
'is_correct': True,
'answer_score': 1,
}), (0, 0, {
'value': 'Number of drawers',
'sequence': 5,
'answer_score': -1,
})
]
}), (0, 0, {
'title': 'How many versions of the Corner Desk do we have?',
'sequence': 5,
'question_type': 'simple_choice',
'constr_mandatory': True,
'suggested_answer_ids': [
(0, 0, {
'value': 1,
'sequence': 1,
}), (0, 0, {
'value': 2,
'sequence': 2,
'is_correct': True,
'answer_score': 2,
}), (0, 0, {
'value': 3,
'sequence': 3,
}), (0, 0, {
'value': 4,
'sequence': 4,
})
]
}), (0, 0, {
'title': 'Do you think we have missing products in our catalog? (not rated)',
'sequence': 6,
'question_type': 'text_box',
}), (0, 0, {
'title': 'Prices',
'sequence': 7,
'is_page': True,
'question_type': False,
'description': """&lt;p&gt;Test your knowledge of our prices.&lt;/p&gt;""",
}), (0, 0, {
'title': 'How much do we sell our Cable Management Box?',
'sequence': 8,
'question_type': 'simple_choice',
'constr_mandatory': True,
'suggested_answer_ids': [
(0, 0, {
'value': '$20',
'sequence': 1,
}), (0, 0, {
'value': '$50',
'sequence': 2,
}), (0, 0, {
'value': '$80',
'sequence': 3,
}), (0, 0, {
'value': '$100',
'sequence': 4,
'is_correct': True,
'answer_score': 2,
}), (0, 0, {
'value': '$200',
'sequence': 5,
}), (0, 0, {
'value': '$300',
'sequence': 6,
})
]
}), (0, 0, {
'title': 'Select all the products that sell for $100 or more',
'sequence': 9,
'question_type': 'multiple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Corner Desk Right Sit',
'sequence': 1,
'answer_score': 1,
'is_correct': True,
}), (0, 0, {
'value': 'Desk Combination',
'sequence': 2,
'answer_score': 1,
'is_correct': True,
}), (0, 0, {
'value': 'Cabinet with Doors',
'sequence': 3,
'answer_score': -1,
}), (0, 0, {
'value': 'Large Desk',
'sequence': 4,
'answer_score': 1,
'is_correct': True,
}), (0, 0, {
'value': 'Letter Tray',
'sequence': 5,
'answer_score': -1,
}), (0, 0, {
'value': 'Office Chair Black',
'sequence': 6,
'answer_score': -1,
}),
]
}), (0, 0, {
'title': 'What do you think about our prices (not rated)?',
'sequence': 10,
'question_type': 'simple_choice',
'constr_mandatory': True,
'suggested_answer_ids': [
(0, 0, {
'value': 'Very underpriced',
'sequence': 1,
}), (0, 0, {
'value': 'Underpriced',
'sequence': 2,
}), (0, 0, {
'value': 'Correctly priced',
'sequence': 3,
}), (0, 0, {
'value': 'A little bit overpriced',
'sequence': 4,
}), (0, 0, {
'value': 'A lot overpriced',
'sequence': 5,
})
]
}), (0, 0, {
'title': 'Policies',
'sequence': 11,
'is_page': True,
'question_type': False,
'description': """&lt;p&gt;Test your knowledge of our policies.&lt;/p&gt;""",
}), (0, 0, {
'title': 'How many days is our money-back guarantee?',
'sequence': 12,
'question_type': 'numerical_box',
'constr_mandatory': True,
'is_scored_question': True,
'answer_numerical_box': 30,
'answer_score': 1,
}), (0, 0, {
'title': 'If a customer purchases a product on 6 January 2020, what is the latest day we expect to ship it?',
'sequence': 13,
'question_type': 'date',
'is_scored_question': True,
'answer_date': '2020-01-08',
'answer_score': 1,
}), (0, 0, {
'title': 'If a customer purchases a 1 year warranty on 6 January 2020, when do we expect the warranty to expire?',
'sequence': 14,
'question_type': 'datetime',
'is_scored_question': True,
'answer_datetime': '2021-01-07 00:00:01',
'answer_score': 1,
}), (0, 0, {
'title': 'What day to you think is best for us to start having an annual sale (not rated)?',
'sequence': 15,
'question_type': 'date',
}), (0, 0, {
'title': 'What day and time do you think most customers are most likely to call customer service (not rated)?',
'sequence': 16,
'question_type': 'datetime',
}), (0, 0, {
'title': 'How many chairs do you think we should aim to sell in a year (not rated)?',
'sequence': 17,
'question_type': 'numerical_box',
})
]
})
def test_04_certification_success_tour(self):
access_token = self.survey_certification.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_certification_success', login="demo")
def test_05_certification_failure_tour(self):
access_token = self.survey_certification.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_certification_failure', login="demo")

View file

@ -0,0 +1,268 @@
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
from odoo.addons.base.tests.common import HttpCaseWithUserDemo
@odoo.tests.common.tagged('post_install', '-at_install')
class TestUiFeedback(HttpCaseWithUserDemo):
def setUp(self):
super(TestUiFeedback, self).setUp()
self.survey_feedback = self.env['survey.survey'].create({
'title': 'User Feedback Form',
'access_token': 'b137640d-14d4-4748-9ef6-344caaaaaae',
'access_mode': 'public',
'users_can_go_back': True,
'questions_layout': 'page_per_section',
'description': """<p>This survey allows you to give a feedback about your experience with our eCommerce solution.
Filling it helps us improving your experience.</p></field>""",
'question_and_page_ids': [
(0, 0, {
'title': 'General information',
'sequence': 1,
'question_type': False,
'is_page': True,
'description': """<p>This section is about general information about you. Answering them helps qualifying your answers.</p>""",
}), (0, 0, {
'title': 'Where do you live ?',
'sequence': 2,
'question_type': 'char_box',
'constr_mandatory': False,
}), (0, 0, {
'title': 'When is your date of birth ?',
'sequence': 3,
'question_type': 'date',
'description': False,
}), (0, 0, {
'title': 'How frequently do you buy products online ?',
'sequence': 4,
'question_type': 'simple_choice',
'comments_allowed': True,
'comment_count_as_answer': True,
'constr_mandatory': True,
'suggested_answer_ids': [
(0, 0, {
'value': 'Once a day',
'sequence': 1,
}), (0, 0, {
'value': 'Once a week',
'sequence': 2,
}), (0, 0, {
'value': 'Once a month',
'sequence': 3,
}), (0, 0, {
'value': 'Once a year',
'sequence': 4,
}), (0, 0, {
'value': 'Other (answer in comment)',
'sequence': 5,
})],
}), (0, 0, {
'title': 'How many times did you order products on our website ?',
'sequence': 5,
'question_type': 'numerical_box',
'constr_mandatory': True,
}), (0, 0, {
'title': 'About our ecommerce',
'sequence': 6,
'is_page': True,
'question_type': False,
'description': """<p>This section is about our eCommerce experience itself.</p>""",
}), (0, 0, {
'title': 'Which of the following words would you use to describe our products ?',
'sequence': 7,
'question_type': 'multiple_choice',
'constr_mandatory': True,
'comments_allowed': True,
'comment_count_as_answer': False,
'suggested_answer_ids': [
(0, 0, {
'value': 'High quality',
'sequence': 1,
}), (0, 0, {
'value': 'Useful',
'sequence': 2,
}), (0, 0, {
'value': 'Unique',
'sequence': 3,
}), (0, 0, {
'value': 'Good value for money',
'sequence': 4,
}), (0, 0, {
'value': 'Overpriced',
'sequence': 5,
}), (0, 0, {
'value': 'Impractical',
'sequence': 6,
}), (0, 0, {
'value': 'Ineffective',
'sequence': 7,
}), (0, 0, {
'value': 'Poor quality',
'sequence': 8,
}), (0, 0, {
'value': 'Other',
'sequence': 9,
})],
}), (0, 0, {
'title': 'What do your think about our new eCommerce ?',
'sequence': 8,
'question_type': 'matrix',
'matrix_subtype': 'multiple',
'constr_mandatory': True,
'suggested_answer_ids': [(0, 0, {
'value': 'Totally disagree',
'sequence': 1
}), (0, 0, {
'value': 'Disagree',
'sequence': 2,
}), (0, 0, {
'value': 'Agree',
'sequence': 3,
}), (0, 0, {
'value': 'Totally agree',
'sequence': 4,
})],
'matrix_row_ids': [(0, 0, {
'value': 'The new layout and design is fresh and up-to-date',
'sequence': 1,
}), (0, 0, {
'value': 'It is easy to find the product that I want',
'sequence': 2,
}), (0, 0, {
'value': 'The tool to compare the products is useful to make a choice',
'sequence': 3,
}), (0, 0, {
'value': 'The checkout process is clear and secure',
'sequence': 4,
}), (0, 0, {
'value': 'I have added products to my wishlist',
'sequence': 5,
})],
}), (0, 0, {
'title': 'Do you have any other comments, questions, or concerns ?',
'sequence': 9,
'question_type': 'text_box',
'constr_mandatory': False,
})
],
})
def test_01_admin_survey_tour(self):
access_token = self.survey_feedback.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey', login="admin")
def test_02_demo_survey_tour(self):
access_token = self.survey_feedback.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey', login="demo")
def test_03_public_survey_tour(self):
access_token = self.survey_feedback.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey')
def test_04_public_survey_with_triggers(self):
""" Check that chained conditional questions are correctly
hidden from survey when a previously selected triggering answer is
unselected. E.g., if a specific answer for "Question 1" is selected,
which triggers asking "Question 2", and a specific answer for
"Question 2" is selected and triggers asking "Question 3",
changing the selected answer for "Question 1" should:
* hide questions 2 and 3
* enable submitting the survey without answering questions 2 and 3,
even if "constr_mandatory=True", as they are not visible.
"""
survey_with_triggers = self.env['survey.survey'].create({
'title': 'Survey With Triggers',
'access_token': '3cfadce3-3f7e-41da-920d-10fa0eb19527',
'access_mode': 'public',
'users_can_go_back': True,
'questions_layout': 'one_page',
'description': "<p>Test survey with conditional questions</p>",
'question_and_page_ids': [
(0, 0, {
'title': 'Q1',
'sequence': 1,
'question_type': 'simple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Answer 1',
'sequence': 1,
}), (0, 0, {
'value': 'Answer 2',
'sequence': 2,
}), (0, 0, {
'value': 'Answer 3',
'sequence': 3,
})
],
'constr_mandatory': True,
}), (0, 0, {
'title': 'Q2',
'sequence': 2,
'question_type': 'simple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Answer 1',
'sequence': 1,
}), (0, 0, {
'value': 'Answer 2',
'sequence': 2,
})
],
'is_conditional': True,
'constr_mandatory': True,
}), (0, 0, {
'title': 'Q3',
'sequence': 3,
'question_type': 'simple_choice',
'suggested_answer_ids': [
(0, 0, {
'value': 'Answer 1',
'sequence': 1,
}), (0, 0, {
'value': 'Answer 2',
'sequence': 2,
})
],
'is_conditional': True,
'constr_mandatory': True,
}), (0, 0, {
'title': 'Q4',
'sequence': 4,
'question_type': 'numerical_box',
'is_conditional': True,
'constr_mandatory': True,
}), (0, 0, {
'title': 'Q5',
'sequence': 5,
'question_type': 'numerical_box',
'is_conditional': True,
})
]
})
q1, q2, q3, q4, q5 = survey_with_triggers.question_and_page_ids
q1_a1, q1_a2, __ = q1.suggested_answer_ids
q2_a1 = q2.suggested_answer_ids[0]
q2.triggering_question_id = q1
q2.triggering_answer_id = q1_a1
q3.triggering_question_id = q2
q3.triggering_answer_id = q2_a1
q4.triggering_question_id = q1
q4.triggering_answer_id = q1_a2
q5.triggering_question_id = q1
q5.triggering_answer_id = q1_a2
access_token = survey_with_triggers.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey_chained_conditional_questions')
def test_06_survey_prefill(self):
access_token = self.survey_feedback.access_token
self.start_tour("/survey/start/%s" % access_token, 'test_survey_prefill')