mirror of
https://github.com/bringout/oca-ocb-core.git
synced 2026-04-22 12:32:00 +02:00
19.0 vanilla
This commit is contained in:
parent
d1963a3c3a
commit
2d3ee4855a
7430 changed files with 2687981 additions and 2965473 deletions
|
|
@ -3,9 +3,10 @@
|
|||
|
||||
import collections
|
||||
import contextlib
|
||||
import json
|
||||
import itertools
|
||||
import json
|
||||
import operator
|
||||
from textwrap import shorten
|
||||
|
||||
from odoo import api, fields, models, tools, _
|
||||
from odoo.exceptions import UserError, ValidationError
|
||||
|
|
@ -30,7 +31,7 @@ class SurveyQuestion(models.Model):
|
|||
items around.
|
||||
|
||||
It also removes on level of encoding by directly having 'Add a page' and 'Add a question'
|
||||
links on the tree view of questions, enabling a faster encoding.
|
||||
links on the list view of questions, enabling a faster encoding.
|
||||
|
||||
However, this has the downside of making the code reading a little bit more complicated.
|
||||
Efforts were made at the model level to create computed fields so that the use of these models
|
||||
|
|
@ -46,6 +47,17 @@ class SurveyQuestion(models.Model):
|
|||
_rec_name = 'title'
|
||||
_order = 'sequence,id'
|
||||
|
||||
@api.model
|
||||
def default_get(self, fields):
|
||||
res = super().default_get(fields)
|
||||
if default_survey_id := self.env.context.get('default_survey_id'):
|
||||
survey = self.env['survey.survey'].browse(default_survey_id)
|
||||
if 'is_time_limited' in fields and 'is_time_limited' not in res:
|
||||
res['is_time_limited'] = survey.session_speed_rating
|
||||
if 'time_limit' in fields and 'time_limit' not in res:
|
||||
res['time_limit'] = survey.session_speed_rating_time_limit
|
||||
return res
|
||||
|
||||
# question generic data
|
||||
title = fields.Char('Title', required=True, translate=True)
|
||||
description = fields.Html(
|
||||
|
|
@ -54,9 +66,13 @@ class SurveyQuestion(models.Model):
|
|||
question_placeholder = fields.Char("Placeholder", translate=True, compute="_compute_question_placeholder", store=True, readonly=False)
|
||||
background_image = fields.Image("Background Image", compute="_compute_background_image", store=True, readonly=False)
|
||||
background_image_url = fields.Char("Background Url", compute="_compute_background_image_url")
|
||||
survey_id = fields.Many2one('survey.survey', string='Survey', ondelete='cascade')
|
||||
survey_id = fields.Many2one('survey.survey', string='Survey', ondelete='cascade', index='btree_not_null')
|
||||
scoring_type = fields.Selection(related='survey_id.scoring_type', string='Scoring Type', readonly=True)
|
||||
sequence = fields.Integer('Sequence', default=10)
|
||||
session_available = fields.Boolean(related='survey_id.session_available', string='Live Session available', readonly=True)
|
||||
survey_session_speed_rating = fields.Boolean(related="survey_id.session_speed_rating")
|
||||
survey_session_speed_rating_time_limit = fields.Integer(related="survey_id.session_speed_rating_time_limit", string="General Time limit (seconds)")
|
||||
|
||||
# page specific
|
||||
is_page = fields.Boolean('Is a page?')
|
||||
question_ids = fields.One2many('survey.question', string='Questions', compute="_compute_question_ids")
|
||||
|
|
@ -74,6 +90,7 @@ class SurveyQuestion(models.Model):
|
|||
('text_box', 'Multiple Lines Text Box'),
|
||||
('char_box', 'Single Line Text Box'),
|
||||
('numerical_box', 'Numerical Value'),
|
||||
('scale', 'Scale'),
|
||||
('date', 'Date'),
|
||||
('datetime', 'Datetime'),
|
||||
('matrix', 'Matrix')], string='Question Type',
|
||||
|
|
@ -82,6 +99,8 @@ class SurveyQuestion(models.Model):
|
|||
'Scored', compute='_compute_is_scored_question',
|
||||
readonly=False, store=True, copy=True,
|
||||
help="Include this question as part of quiz scoring. Requires an answer and answer score to be taken into account.")
|
||||
has_image_only_suggested_answer = fields.Boolean(
|
||||
"Has image only suggested answer", compute='_compute_has_image_only_suggested_answer')
|
||||
# -- scoreable/answerable simple answer_types: numerical_box / date / datetime
|
||||
answer_numerical_box = fields.Float('Correct numerical answer', help="Correct number answer for this question.")
|
||||
answer_date = fields.Date('Correct date answer', help="Correct date answer for this question.")
|
||||
|
|
@ -105,9 +124,16 @@ class SurveyQuestion(models.Model):
|
|||
matrix_row_ids = fields.One2many(
|
||||
'survey.question.answer', 'matrix_question_id', string='Matrix Rows', copy=True,
|
||||
help='Labels used for proposed choices: rows of matrix')
|
||||
# -- scale
|
||||
scale_min = fields.Integer("Scale Minimum Value", default=0)
|
||||
scale_max = fields.Integer("Scale Maximum Value", default=10)
|
||||
scale_min_label = fields.Char("Scale Minimum Label", translate=True)
|
||||
scale_mid_label = fields.Char("Scale Middle Label", translate=True)
|
||||
scale_max_label = fields.Char("Scale Maximum Label", translate=True)
|
||||
# -- display & timing options
|
||||
is_time_limited = fields.Boolean("The question is limited in time",
|
||||
help="Currently only supported for live sessions.")
|
||||
is_time_customized = fields.Boolean("Customized speed rewards")
|
||||
time_limit = fields.Integer("Time limit (seconds)")
|
||||
# -- comments (simple choice, multiple choice, matrix (without count as an answer))
|
||||
comments_allowed = fields.Boolean('Show Comments Field')
|
||||
|
|
@ -124,7 +150,7 @@ class SurveyQuestion(models.Model):
|
|||
validation_max_date = fields.Date('Maximum Date')
|
||||
validation_min_datetime = fields.Datetime('Minimum Datetime')
|
||||
validation_max_datetime = fields.Datetime('Maximum Datetime')
|
||||
validation_error_msg = fields.Char('Validation Error message', translate=True)
|
||||
validation_error_msg = fields.Char('Validation Error', translate=True)
|
||||
constr_mandatory = fields.Boolean('Mandatory Answer')
|
||||
constr_error_msg = fields.Char('Error message', translate=True)
|
||||
# answers
|
||||
|
|
@ -132,36 +158,73 @@ class SurveyQuestion(models.Model):
|
|||
'survey.user_input.line', 'question_id', string='Answers',
|
||||
domain=[('skipped', '=', False)], groups='survey.group_survey_user')
|
||||
|
||||
# Conditional display
|
||||
is_conditional = fields.Boolean(
|
||||
string='Conditional Display', copy=True, help="""If checked, this question will be displayed only
|
||||
if the specified conditional answer have been selected in a previous question""")
|
||||
triggering_question_id = fields.Many2one(
|
||||
'survey.question', string="Triggering Question", copy=False, compute="_compute_triggering_question_id",
|
||||
store=True, readonly=False, help="Question containing the triggering answer to display the current question.",
|
||||
domain="[('survey_id', '=', survey_id), \
|
||||
'&', ('question_type', 'in', ['simple_choice', 'multiple_choice']), \
|
||||
'|', \
|
||||
('sequence', '<', sequence), \
|
||||
'&', ('sequence', '=', sequence), ('id', '<', id)]")
|
||||
triggering_answer_id = fields.Many2one(
|
||||
'survey.question.answer', string="Triggering Answer", copy=False, compute="_compute_triggering_answer_id",
|
||||
store=True, readonly=False, help="Answer that will trigger the display of the current question.",
|
||||
domain="[('question_id', '=', triggering_question_id)]")
|
||||
# Not stored, convenient for trigger display computation.
|
||||
triggering_question_ids = fields.Many2many(
|
||||
'survey.question', string="Triggering Questions", compute="_compute_triggering_question_ids",
|
||||
store=False, help="Questions containing the triggering answer(s) to display the current question.")
|
||||
|
||||
_sql_constraints = [
|
||||
('positive_len_min', 'CHECK (validation_length_min >= 0)', 'A length must be positive!'),
|
||||
('positive_len_max', 'CHECK (validation_length_max >= 0)', 'A length must be positive!'),
|
||||
('validation_length', 'CHECK (validation_length_min <= validation_length_max)', 'Max length cannot be smaller than min length!'),
|
||||
('validation_float', 'CHECK (validation_min_float_value <= validation_max_float_value)', 'Max value cannot be smaller than min value!'),
|
||||
('validation_date', 'CHECK (validation_min_date <= validation_max_date)', 'Max date cannot be smaller than min date!'),
|
||||
('validation_datetime', 'CHECK (validation_min_datetime <= validation_max_datetime)', 'Max datetime cannot be smaller than min datetime!'),
|
||||
('positive_answer_score', 'CHECK (answer_score >= 0)', 'An answer score for a non-multiple choice question cannot be negative!'),
|
||||
('scored_datetime_have_answers', "CHECK (is_scored_question != True OR question_type != 'datetime' OR answer_datetime is not null)",
|
||||
'All "Is a scored question = True" and "Question Type: Datetime" questions need an answer'),
|
||||
('scored_date_have_answers', "CHECK (is_scored_question != True OR question_type != 'date' OR answer_date is not null)",
|
||||
'All "Is a scored question = True" and "Question Type: Date" questions need an answer')
|
||||
]
|
||||
allowed_triggering_question_ids = fields.Many2many(
|
||||
'survey.question', string="Allowed Triggering Questions", copy=False, compute="_compute_allowed_triggering_question_ids")
|
||||
is_placed_before_trigger = fields.Boolean(
|
||||
string='Is misplaced?', help="Is this question placed before any of its trigger questions?",
|
||||
compute="_compute_allowed_triggering_question_ids")
|
||||
triggering_answer_ids = fields.Many2many(
|
||||
'survey.question.answer', string="Triggering Answers", copy=False, store=True,
|
||||
readonly=False, help="Picking any of these answers will trigger this question.\n"
|
||||
"Leave the field empty if the question should always be displayed.",
|
||||
domain="""[
|
||||
('question_id.survey_id', '=', survey_id),
|
||||
'&', ('question_id.question_type', 'in', ['simple_choice', 'multiple_choice']),
|
||||
'|',
|
||||
('question_id.sequence', '<', sequence),
|
||||
'&', ('question_id.sequence', '=', sequence), ('question_id.id', '<', id)
|
||||
]"""
|
||||
)
|
||||
|
||||
_positive_len_min = models.Constraint(
|
||||
'CHECK (validation_length_min >= 0)',
|
||||
'A length must be positive!',
|
||||
)
|
||||
_positive_len_max = models.Constraint(
|
||||
'CHECK (validation_length_max >= 0)',
|
||||
'A length must be positive!',
|
||||
)
|
||||
_validation_length = models.Constraint(
|
||||
'CHECK (validation_length_min <= validation_length_max)',
|
||||
'Max length cannot be smaller than min length!',
|
||||
)
|
||||
_validation_float = models.Constraint(
|
||||
'CHECK (validation_min_float_value <= validation_max_float_value)',
|
||||
'Max value cannot be smaller than min value!',
|
||||
)
|
||||
_validation_date = models.Constraint(
|
||||
'CHECK (validation_min_date <= validation_max_date)',
|
||||
'Max date cannot be smaller than min date!',
|
||||
)
|
||||
_validation_datetime = models.Constraint(
|
||||
'CHECK (validation_min_datetime <= validation_max_datetime)',
|
||||
'Max datetime cannot be smaller than min datetime!',
|
||||
)
|
||||
_positive_answer_score = models.Constraint(
|
||||
'CHECK (answer_score >= 0)',
|
||||
'An answer score for a non-multiple choice question cannot be negative!',
|
||||
)
|
||||
_scored_datetime_have_answers = models.Constraint(
|
||||
"CHECK (is_scored_question != True OR question_type != 'datetime' OR answer_datetime is not null)",
|
||||
'All "Is a scored question = True" and "Question Type: Datetime" questions need an answer',
|
||||
)
|
||||
_scored_date_have_answers = models.Constraint(
|
||||
"CHECK (is_scored_question != True OR question_type != 'date' OR answer_date is not null)",
|
||||
'All "Is a scored question = True" and "Question Type: Date" questions need an answer',
|
||||
)
|
||||
_scale = models.Constraint(
|
||||
"CHECK (question_type != 'scale' OR (scale_min >= 0 AND scale_max <= 10 AND scale_min < scale_max))",
|
||||
'The scale must be a growing non-empty range between 0 and 10 (inclusive)',
|
||||
)
|
||||
_is_time_limited_have_time_limit = models.Constraint(
|
||||
'CHECK (is_time_limited != TRUE OR time_limit IS NOT NULL AND time_limit > 0)',
|
||||
'All time-limited questions need a positive time limit',
|
||||
)
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# CONSTRAINT METHODS
|
||||
|
|
@ -177,6 +240,13 @@ class SurveyQuestion(models.Model):
|
|||
# COMPUTE METHODS
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
@api.depends('suggested_answer_ids', 'suggested_answer_ids.value')
|
||||
def _compute_has_image_only_suggested_answer(self):
|
||||
questions_with_image_only_answer = self.env['survey.question'].search(
|
||||
[('id', 'in', self.ids), ('suggested_answer_ids.value', 'in', [False, ''])])
|
||||
questions_with_image_only_answer.has_image_only_suggested_answer = True
|
||||
(self - questions_with_image_only_answer).has_image_only_suggested_answer = False
|
||||
|
||||
@api.depends('question_type')
|
||||
def _compute_question_placeholder(self):
|
||||
for question in self:
|
||||
|
|
@ -220,19 +290,10 @@ class SurveyQuestion(models.Model):
|
|||
|
||||
@api.depends('survey_id.question_and_page_ids.is_page', 'survey_id.question_and_page_ids.sequence')
|
||||
def _compute_question_ids(self):
|
||||
"""Will take all questions of the survey for which the index is higher than the index of this page
|
||||
and lower than the index of the next page."""
|
||||
for question in self:
|
||||
if question.is_page:
|
||||
next_page_index = False
|
||||
for page in question.survey_id.page_ids:
|
||||
if page._index() > question._index():
|
||||
next_page_index = page._index()
|
||||
break
|
||||
|
||||
question.question_ids = question.survey_id.question_ids.filtered(
|
||||
lambda q: q._index() > question._index() and (not next_page_index or q._index() < next_page_index)
|
||||
)
|
||||
question.question_ids = question.survey_id.question_ids\
|
||||
.filtered(lambda q: q.page_id == question).sorted(lambda q: q._index())
|
||||
else:
|
||||
question.question_ids = self.env['survey.question']
|
||||
|
||||
|
|
@ -269,33 +330,59 @@ class SurveyQuestion(models.Model):
|
|||
if not question.validation_required or question.question_type not in ['char_box', 'numerical_box', 'date', 'datetime']:
|
||||
question.validation_required = False
|
||||
|
||||
@api.depends('is_conditional')
|
||||
def _compute_triggering_question_id(self):
|
||||
""" Used as an 'onchange' : Reset the triggering question if user uncheck 'Conditional Display'
|
||||
Avoid CacheMiss : set the value to False if the value is not set yet."""
|
||||
for question in self:
|
||||
if not question.is_conditional or question.triggering_question_id is None:
|
||||
question.triggering_question_id = False
|
||||
@api.depends('survey_id', 'survey_id.question_ids', 'triggering_answer_ids')
|
||||
def _compute_allowed_triggering_question_ids(self):
|
||||
"""Although the question (and possible trigger questions) sequence
|
||||
is used here, we do not add these fields to the dependency list to
|
||||
avoid cascading rpc calls when reordering questions via the webclient.
|
||||
"""
|
||||
possible_trigger_questions = self.search([
|
||||
('is_page', '=', False),
|
||||
('question_type', 'in', ['simple_choice', 'multiple_choice']),
|
||||
('suggested_answer_ids', '!=', False),
|
||||
('survey_id', 'in', self.survey_id.ids)
|
||||
])
|
||||
# Using the sequence stored in db is necessary for existing questions that are passed as
|
||||
# NewIds because the sequence provided by the JS client can be incorrect.
|
||||
(self | possible_trigger_questions).flush_recordset()
|
||||
self.env.cr.execute(
|
||||
"SELECT id, sequence FROM survey_question WHERE id =ANY(%s)",
|
||||
[self.ids]
|
||||
)
|
||||
conditional_questions_sequences = dict(self.env.cr.fetchall()) # id: sequence mapping
|
||||
|
||||
@api.depends('triggering_question_id')
|
||||
def _compute_triggering_answer_id(self):
|
||||
""" Used as an 'onchange' : Reset the triggering answer if user unset or change the triggering question
|
||||
or uncheck 'Conditional Display'.
|
||||
Avoid CacheMiss : set the value to False if the value is not set yet."""
|
||||
for question in self:
|
||||
if not question.triggering_question_id \
|
||||
or question.triggering_question_id != question.triggering_answer_id.question_id\
|
||||
or question.triggering_answer_id is None:
|
||||
question.triggering_answer_id = False
|
||||
question_id = question._origin.id
|
||||
if not question_id: # New question
|
||||
question.allowed_triggering_question_ids = possible_trigger_questions.filtered(
|
||||
lambda q: q.survey_id.id == question.survey_id._origin.id)
|
||||
question.is_placed_before_trigger = False
|
||||
continue
|
||||
|
||||
@api.depends('question_type', 'scoring_type', 'answer_date', 'answer_datetime', 'answer_numerical_box')
|
||||
question_sequence = conditional_questions_sequences[question_id]
|
||||
|
||||
question.allowed_triggering_question_ids = possible_trigger_questions.filtered(
|
||||
lambda q: q.survey_id.id == question.survey_id._origin.id
|
||||
and (q.sequence < question_sequence or q.sequence == question_sequence and q.id < question_id)
|
||||
)
|
||||
question.is_placed_before_trigger = bool(
|
||||
set(question.triggering_answer_ids.question_id.ids)
|
||||
- set(question.allowed_triggering_question_ids.ids) # .ids necessary to match ids with newIds
|
||||
)
|
||||
|
||||
@api.depends('triggering_answer_ids')
|
||||
def _compute_triggering_question_ids(self):
|
||||
for question in self:
|
||||
question.triggering_question_ids = question.triggering_answer_ids.question_id
|
||||
|
||||
@api.depends('question_type', 'scoring_type', 'answer_date', 'answer_datetime', 'answer_numerical_box', 'suggested_answer_ids.is_correct')
|
||||
def _compute_is_scored_question(self):
|
||||
""" Computes whether a question "is scored" or not. Handles following cases:
|
||||
- inconsistent Boolean=None edge case that breaks tests => False
|
||||
- survey is not scored => False
|
||||
- 'date'/'datetime'/'numerical_box' question types w/correct answer => True
|
||||
(implied without user having to activate, except for numerical whose correct value is 0.0)
|
||||
- 'simple_choice / multiple_choice': set to True even if logic is a bit different (coming from answers)
|
||||
- 'simple_choice / multiple_choice': set to True if any of suggested answers are marked as correct
|
||||
- question_type isn't scoreable (note: choice questions scoring logic handled separately) => False
|
||||
"""
|
||||
for question in self:
|
||||
|
|
@ -308,14 +395,45 @@ class SurveyQuestion(models.Model):
|
|||
elif question.question_type == 'numerical_box' and question.answer_numerical_box:
|
||||
question.is_scored_question = True
|
||||
elif question.question_type in ['simple_choice', 'multiple_choice']:
|
||||
question.is_scored_question = True
|
||||
question.is_scored_question = any(question.suggested_answer_ids.mapped('is_correct'))
|
||||
else:
|
||||
question.is_scored_question = False
|
||||
|
||||
@api.onchange('question_type', 'validation_required')
|
||||
def _onchange_validation_parameters(self):
|
||||
"""Ensure no value stays set but not visible on form,
|
||||
preventing saving (+consistency with question type)."""
|
||||
self.validation_email = False
|
||||
self.validation_length_min = 0
|
||||
self.validation_length_max = 0
|
||||
self.validation_min_date = False
|
||||
self.validation_max_date = False
|
||||
self.validation_min_datetime = False
|
||||
self.validation_max_datetime = False
|
||||
self.validation_min_float_value = 0
|
||||
self.validation_max_float_value = 0
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# CRUD
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def copy(self, default=None):
|
||||
new_questions = super().copy(default)
|
||||
for old_question, new_question in zip(self, new_questions):
|
||||
if old_question.triggering_answer_ids:
|
||||
new_question.triggering_answer_ids = old_question.triggering_answer_ids
|
||||
return new_questions
|
||||
|
||||
@api.model_create_multi
|
||||
def create(self, vals_list):
|
||||
questions = super().create(vals_list)
|
||||
questions.filtered(
|
||||
lambda q: q.survey_id
|
||||
and (q.survey_id.session_speed_rating != q.is_time_limited
|
||||
or q.is_time_limited and q.survey_id.session_speed_rating_time_limit != q.time_limit)
|
||||
).is_time_customized = True
|
||||
return questions
|
||||
|
||||
@api.ondelete(at_uninstall=False)
|
||||
def _unlink_except_live_sessions_in_progress(self):
|
||||
running_surveys = self.survey_id.filtered(lambda survey: survey.session_state == 'in_progress')
|
||||
|
|
@ -331,24 +449,26 @@ class SurveyQuestion(models.Model):
|
|||
|
||||
def validate_question(self, answer, comment=None):
|
||||
""" Validate question, depending on question type and parameters
|
||||
for simple choice, text, date and number, answer is simply the answer of the question.
|
||||
For other multiple choices questions, answer is a list of answers (the selected choices
|
||||
or a list of selected answers per question -for matrix type-):
|
||||
- Simple answer : answer = 'example' or 2 or question_answer_id or 2019/10/10
|
||||
- Multiple choice : answer = [question_answer_id1, question_answer_id2, question_answer_id3]
|
||||
- Matrix: answer = { 'rowId1' : [colId1, colId2,...], 'rowId2' : [colId1, colId3, ...] }
|
||||
for simple choice, text, date and number, answer is simply the answer of the question.
|
||||
For other multiple choices questions, answer is a list of answers (the selected choices
|
||||
or a list of selected answers per question -for matrix type-):
|
||||
|
||||
return dict {question.id (int): error (str)} -> empty dict if no validation error.
|
||||
"""
|
||||
- Simple answer : ``answer = 'example'`` or ``2`` or ``question_answer_id`` or ``2019/10/10``
|
||||
- Multiple choice : ``answer = [question_answer_id1, question_answer_id2, question_answer_id3]``
|
||||
- Matrix: ``answer = { 'rowId1' : [colId1, colId2,...], 'rowId2' : [colId1, colId3, ...] }``
|
||||
|
||||
:returns: A dict ``{question.id: error}``, or an empty dict if no validation error.
|
||||
:rtype: dict[int, str]
|
||||
"""
|
||||
self.ensure_one()
|
||||
if isinstance(answer, str):
|
||||
answer = answer.strip()
|
||||
# Empty answer to mandatory question
|
||||
if self.constr_mandatory and not answer and self.question_type not in ['simple_choice', 'multiple_choice']:
|
||||
return {self.id: self.constr_error_msg or _('This question requires an answer.')}
|
||||
|
||||
# because in choices question types, comment can count as answer
|
||||
if answer or self.question_type in ['simple_choice', 'multiple_choice']:
|
||||
if not answer and self.question_type not in ['simple_choice', 'multiple_choice']:
|
||||
if self.constr_mandatory and not self.survey_id.users_can_go_back:
|
||||
return {self.id: self.constr_error_msg or _('This question requires an answer.')}
|
||||
else:
|
||||
if self.question_type == 'char_box':
|
||||
return self._validate_char_box(answer)
|
||||
elif self.question_type == 'numerical_box':
|
||||
|
|
@ -359,6 +479,8 @@ class SurveyQuestion(models.Model):
|
|||
return self._validate_choice(answer, comment)
|
||||
elif self.question_type == 'matrix':
|
||||
return self._validate_matrix(answer)
|
||||
elif self.question_type == 'scale':
|
||||
return self._validate_scale(answer)
|
||||
return {}
|
||||
|
||||
def _validate_char_box(self, answer):
|
||||
|
|
@ -413,11 +535,22 @@ class SurveyQuestion(models.Model):
|
|||
return {}
|
||||
|
||||
def _validate_choice(self, answer, comment):
|
||||
# Empty comment
|
||||
if self.constr_mandatory \
|
||||
and not answer \
|
||||
and not (self.comments_allowed and self.comment_count_as_answer and comment):
|
||||
""" Validates choice-based questions.
|
||||
- Checks that mandatory questions have at least one answer.
|
||||
- For 'simple_choice', ensures that exactly one answer is provided.
|
||||
"""
|
||||
answers = answer if isinstance(answer, list) else ([answer] if answer else [])
|
||||
|
||||
valid_answers_count = len(answers)
|
||||
if comment and self.comment_count_as_answer:
|
||||
valid_answers_count += 1
|
||||
|
||||
if valid_answers_count == 0 and self.constr_mandatory and not self.survey_id.users_can_go_back:
|
||||
return {self.id: self.constr_error_msg or _('This question requires an answer.')}
|
||||
|
||||
if valid_answers_count > 1 and self.question_type == 'simple_choice':
|
||||
return {self.id: _('For this question, you can only select one answer.')}
|
||||
|
||||
return {}
|
||||
|
||||
def _validate_matrix(self, answers):
|
||||
|
|
@ -426,6 +559,13 @@ class SurveyQuestion(models.Model):
|
|||
return {self.id: self.constr_error_msg or _('This question requires an answer.')}
|
||||
return {}
|
||||
|
||||
def _validate_scale(self, answer):
|
||||
if not self.survey_id.users_can_go_back \
|
||||
and self.constr_mandatory \
|
||||
and not answer:
|
||||
return {self.id: self.constr_error_msg or _('This question requires an answer.')}
|
||||
return {}
|
||||
|
||||
def _index(self):
|
||||
"""We would normally just use the 'sequence' field of questions BUT, if the pages and questions are
|
||||
created without ever moving records around, the sequence field can be set to 0 for all the questions.
|
||||
|
|
@ -434,6 +574,36 @@ class SurveyQuestion(models.Model):
|
|||
self.ensure_one()
|
||||
return list(self.survey_id.question_and_page_ids).index(self)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# SPEED RATING
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def _update_time_limit_from_survey(self, is_time_limited=None, time_limit=None):
|
||||
"""Update the speed rating values after a change in survey's speed rating configuration.
|
||||
|
||||
* Questions that were not customized will take the new default values from the survey
|
||||
* Questions that were customized will not change their values, but this method will check
|
||||
and update the `is_time_customized` flag if necessary (to `False`) such that the user
|
||||
won't need to "actively" do it to make the question sensitive to change in survey values.
|
||||
|
||||
This is not done with `_compute`s because `is_time_limited` (and `time_limit`) would depend
|
||||
on `is_time_customized` and vice versa.
|
||||
"""
|
||||
write_vals = {}
|
||||
if is_time_limited is not None:
|
||||
write_vals['is_time_limited'] = is_time_limited
|
||||
if time_limit is not None:
|
||||
write_vals['time_limit'] = time_limit
|
||||
non_time_customized_questions = self.filtered(lambda s: not s.is_time_customized)
|
||||
non_time_customized_questions.write(write_vals)
|
||||
|
||||
# Reset `is_time_customized` as necessary
|
||||
customized_questions = self - non_time_customized_questions
|
||||
back_to_default_questions = customized_questions.filtered(
|
||||
lambda q: q.is_time_limited == q.survey_id.session_speed_rating
|
||||
and (q.is_time_limited is False or q.time_limit == q.survey_id.session_speed_rating_time_limit))
|
||||
back_to_default_questions.is_time_customized = False
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# STATISTICS / REPORTING
|
||||
# ------------------------------------------------------------
|
||||
|
|
@ -466,7 +636,7 @@ class SurveyQuestion(models.Model):
|
|||
answer_line_ids=answer_lines,
|
||||
answer_line_done_ids=done_lines,
|
||||
answer_input_done_ids=done_lines.mapped('user_input_id'),
|
||||
answer_input_skipped_ids=skipped_lines.mapped('user_input_id'),
|
||||
answer_input_ids=answer_lines.mapped('user_input_id'),
|
||||
comment_line_ids=comment_line_ids)
|
||||
question_data.update(question._get_stats_summary_data(answer_lines))
|
||||
|
||||
|
|
@ -474,7 +644,12 @@ class SurveyQuestion(models.Model):
|
|||
table_data, graph_data = question._get_stats_data(answer_lines)
|
||||
question_data['table_data'] = table_data
|
||||
question_data['graph_data'] = json.dumps(graph_data)
|
||||
|
||||
if question.question_type in ["text_box", "char_box", "numerical_box", "date", "datetime"]:
|
||||
answers_data = [
|
||||
[input_line.id, input_line._get_answer_value(), input_line.user_input_id.get_print_url()]
|
||||
for input_line in table_data if not input_line.skipped
|
||||
]
|
||||
question_data["answers_data"] = json.dumps(answers_data, default=str)
|
||||
all_questions_data.append(question_data)
|
||||
return all_questions_data
|
||||
|
||||
|
|
@ -486,6 +661,9 @@ class SurveyQuestion(models.Model):
|
|||
return table_data, [{'key': self.title, 'values': graph_data}]
|
||||
elif self.question_type == 'matrix':
|
||||
return self._get_stats_graph_data_matrix(user_input_lines)
|
||||
elif self.question_type == 'scale':
|
||||
table_data, graph_data = self._get_stats_data_scale(user_input_lines)
|
||||
return table_data, [{'key': self.title, 'values': graph_data}]
|
||||
return [line for line in user_input_lines], []
|
||||
|
||||
def _get_stats_data_answers(self, user_input_lines):
|
||||
|
|
@ -504,17 +682,17 @@ class SurveyQuestion(models.Model):
|
|||
count_data[line.suggested_answer_id] += 1
|
||||
|
||||
table_data = [{
|
||||
'value': _('Other (see comments)') if not sug_answer else sug_answer.value,
|
||||
'suggested_answer': sug_answer,
|
||||
'count': count_data[sug_answer],
|
||||
'count_text': _("%s Votes", count_data[sug_answer]),
|
||||
'value': _('Other (see comments)') if not suggested_answer else suggested_answer.value_label,
|
||||
'suggested_answer': suggested_answer,
|
||||
'count': count_data[suggested_answer],
|
||||
'count_text': self.env._("%s Votes", count_data[suggested_answer]),
|
||||
}
|
||||
for sug_answer in suggested_answers]
|
||||
for suggested_answer in suggested_answers]
|
||||
graph_data = [{
|
||||
'text': _('Other (see comments)') if not sug_answer else sug_answer.value,
|
||||
'count': count_data[sug_answer]
|
||||
'text': self.env._('Other (see comments)') if not suggested_answer else suggested_answer.value_label,
|
||||
'count': count_data[suggested_answer]
|
||||
}
|
||||
for sug_answer in suggested_answers]
|
||||
for suggested_answer in suggested_answers]
|
||||
|
||||
return table_data, graph_data
|
||||
|
||||
|
|
@ -530,19 +708,42 @@ class SurveyQuestion(models.Model):
|
|||
table_data = [{
|
||||
'row': row,
|
||||
'columns': [{
|
||||
'suggested_answer': sug_answer,
|
||||
'count': count_data[(row, sug_answer)]
|
||||
} for sug_answer in suggested_answers],
|
||||
'suggested_answer': suggested_answer,
|
||||
'count': count_data[(row, suggested_answer)]
|
||||
} for suggested_answer in suggested_answers],
|
||||
} for row in matrix_rows]
|
||||
graph_data = [{
|
||||
'key': sug_answer.value,
|
||||
'key': suggested_answer.value,
|
||||
'values': [{
|
||||
'text': row.value,
|
||||
'count': count_data[(row, sug_answer)]
|
||||
'count': count_data[(row, suggested_answer)]
|
||||
}
|
||||
for row in matrix_rows
|
||||
]
|
||||
} for sug_answer in suggested_answers]
|
||||
} for suggested_answer in suggested_answers]
|
||||
|
||||
return table_data, graph_data
|
||||
|
||||
def _get_stats_data_scale(self, user_input_lines):
|
||||
suggested_answers = range(self.scale_min, self.scale_max + 1)
|
||||
# Scale doesn't support comment as answer, so no extra value added
|
||||
|
||||
count_data = dict.fromkeys(suggested_answers, 0)
|
||||
for line in user_input_lines:
|
||||
if not line.skipped and line.value_scale in count_data:
|
||||
count_data[line.value_scale] += 1
|
||||
|
||||
table_data = []
|
||||
graph_data = []
|
||||
for sug_answer in suggested_answers:
|
||||
table_data.append({'value': str(sug_answer),
|
||||
'suggested_answer': self.env['survey.question.answer'],
|
||||
'count': count_data[sug_answer],
|
||||
'count_text': _("%s Votes", count_data[sug_answer]),
|
||||
})
|
||||
graph_data.append({'text': str(sug_answer),
|
||||
'count': count_data[sug_answer]
|
||||
})
|
||||
|
||||
return table_data, graph_data
|
||||
|
||||
|
|
@ -552,8 +753,10 @@ class SurveyQuestion(models.Model):
|
|||
stats.update(self._get_stats_summary_data_choice(user_input_lines))
|
||||
elif self.question_type == 'numerical_box':
|
||||
stats.update(self._get_stats_summary_data_numerical(user_input_lines))
|
||||
elif self.question_type == 'scale':
|
||||
stats.update(self._get_stats_summary_data_numerical(user_input_lines, 'value_scale'))
|
||||
|
||||
if self.question_type in ['numerical_box', 'date', 'datetime']:
|
||||
if self.question_type in ['numerical_box', 'date', 'datetime', 'scale']:
|
||||
stats.update(self._get_stats_summary_data_scored(user_input_lines))
|
||||
return stats
|
||||
|
||||
|
|
@ -575,8 +778,8 @@ class SurveyQuestion(models.Model):
|
|||
'partial_inputs_count': len(partial_inputs),
|
||||
}
|
||||
|
||||
def _get_stats_summary_data_numerical(self, user_input_lines):
|
||||
all_values = user_input_lines.filtered(lambda line: not line.skipped).mapped('value_numerical_box')
|
||||
def _get_stats_summary_data_numerical(self, user_input_lines, fname='value_numerical_box'):
|
||||
all_values = user_input_lines.filtered(lambda line: not line.skipped).mapped(fname)
|
||||
lines_sum = sum(all_values)
|
||||
return {
|
||||
'numerical_max': max(all_values, default=0),
|
||||
|
|
@ -592,6 +795,42 @@ class SurveyQuestion(models.Model):
|
|||
'right_inputs_count': len(user_input_lines.filtered(lambda line: line.answer_is_correct).mapped('user_input_id'))
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# OTHERS
|
||||
# ------------------------------------------------------------
|
||||
|
||||
def _get_correct_answers(self):
|
||||
""" Return a dictionary linking the scorable question ids to their correct answers.
|
||||
The questions without correct answers are not considered.
|
||||
"""
|
||||
correct_answers = {}
|
||||
|
||||
# Simple and multiple choice
|
||||
choices_questions = self.filtered(lambda q: q.question_type in ['simple_choice', 'multiple_choice'])
|
||||
if choices_questions:
|
||||
suggested_answers_data = self.env['survey.question.answer'].search_read(
|
||||
[('question_id', 'in', choices_questions.ids), ('is_correct', '=', True)],
|
||||
['question_id', 'id'],
|
||||
load='', # prevent computing display_names
|
||||
)
|
||||
for data in suggested_answers_data:
|
||||
if not data.get('id'):
|
||||
continue
|
||||
correct_answers.setdefault(data['question_id'], []).append(data['id'])
|
||||
|
||||
# Numerical box, date, datetime
|
||||
for question in self - choices_questions:
|
||||
if question.question_type not in ['numerical_box', 'date', 'datetime']:
|
||||
continue
|
||||
answer = question[f'answer_{question.question_type}']
|
||||
if question.question_type == 'date':
|
||||
answer = tools.format_date(self.env, answer)
|
||||
elif question.question_type == 'datetime':
|
||||
answer = tools.format_datetime(self.env, answer, tz='UTC', dt_format=False)
|
||||
correct_answers[question.id] = answer
|
||||
|
||||
return correct_answers
|
||||
|
||||
|
||||
class SurveyQuestionAnswer(models.Model):
|
||||
""" A preconfigured answer for a question. This model stores values used
|
||||
|
|
@ -604,25 +843,79 @@ class SurveyQuestionAnswer(models.Model):
|
|||
"""
|
||||
_name = 'survey.question.answer'
|
||||
_rec_name = 'value'
|
||||
_order = 'sequence, id'
|
||||
_rec_names_search = ['question_id.title', 'value']
|
||||
_order = 'question_id, sequence, id'
|
||||
_description = 'Survey Label'
|
||||
|
||||
MAX_ANSWER_NAME_LENGTH = 90 # empirically tested in client dropdown
|
||||
|
||||
# question and question related fields
|
||||
question_id = fields.Many2one('survey.question', string='Question', ondelete='cascade')
|
||||
matrix_question_id = fields.Many2one('survey.question', string='Question (as matrix row)', ondelete='cascade')
|
||||
question_id = fields.Many2one('survey.question', string='Question', ondelete='cascade', index='btree_not_null')
|
||||
matrix_question_id = fields.Many2one('survey.question', string='Question (as matrix row)', ondelete='cascade', index='btree_not_null')
|
||||
question_type = fields.Selection(related='question_id.question_type')
|
||||
sequence = fields.Integer('Label Sequence order', default=10)
|
||||
scoring_type = fields.Selection(related='question_id.scoring_type')
|
||||
# answer related fields
|
||||
value = fields.Char('Suggested value', translate=True, required=True)
|
||||
value = fields.Char('Suggested value', translate=True)
|
||||
value_image = fields.Image('Image', max_width=1024, max_height=1024)
|
||||
value_image_filename = fields.Char('Image Filename')
|
||||
value_label = fields.Char('Value Label', compute='_compute_value_label',
|
||||
help="Answer label as either the value itself if not empty "
|
||||
"or a letter representing the index of the answer otherwise.")
|
||||
is_correct = fields.Boolean('Correct')
|
||||
answer_score = fields.Float('Score', help="A positive score indicates a correct choice; a negative or null score indicates a wrong answer")
|
||||
|
||||
_value_not_empty = models.Constraint(
|
||||
'CHECK (value IS NOT NULL OR value_image_filename IS NOT NULL)',
|
||||
'Suggested answer value must not be empty (a text and/or an image must be provided).',
|
||||
)
|
||||
|
||||
@api.depends('value_label', 'question_id.question_type', 'question_id.title', 'matrix_question_id')
|
||||
def _compute_display_name(self):
|
||||
"""Render an answer name as "Question title : Answer label value", making sure it is not too long.
|
||||
|
||||
Unless the answer is part of a matrix-type question, this implementation makes sure we have
|
||||
at least 30 characters for the question title, then we elide it, leaving the rest of the
|
||||
space for the answer.
|
||||
"""
|
||||
for answer in self:
|
||||
answer_label = answer.value_label
|
||||
if not answer.question_id or answer.question_id.question_type == 'matrix':
|
||||
answer.display_name = answer_label
|
||||
continue
|
||||
title = answer.question_id.title or _("[Question Title]")
|
||||
n_extra_characters = len(title) + len(answer_label) + 3 - self.MAX_ANSWER_NAME_LENGTH # 3 for `" : "`
|
||||
if n_extra_characters <= 0:
|
||||
answer.display_name = f'{title} : {answer_label}'
|
||||
else:
|
||||
answer.display_name = shorten(
|
||||
f'{shorten(title, max(30, len(title) - n_extra_characters), placeholder="...")} : {answer_label}',
|
||||
self.MAX_ANSWER_NAME_LENGTH,
|
||||
placeholder="..."
|
||||
)
|
||||
|
||||
@api.depends('question_id.suggested_answer_ids', 'sequence', 'value')
|
||||
def _compute_value_label(self):
|
||||
""" Compute the label as the value if not empty or a letter representing the index of the answer otherwise. """
|
||||
for answer in self:
|
||||
# using image -> use a letter to represent the value
|
||||
if not answer.value and answer.question_id and answer.id:
|
||||
answer_idx = answer.question_id.suggested_answer_ids.ids.index(answer.id)
|
||||
answer.value_label = chr(65 + answer_idx) if answer_idx < 27 else ''
|
||||
else:
|
||||
answer.value_label = answer.value or ''
|
||||
|
||||
@api.constrains('question_id', 'matrix_question_id')
|
||||
def _check_question_not_empty(self):
|
||||
"""Ensure that field question_id XOR field matrix_question_id is not null"""
|
||||
for label in self:
|
||||
if not bool(label.question_id) != bool(label.matrix_question_id):
|
||||
raise ValidationError(_("A label must be attached to only one question."))
|
||||
|
||||
def _get_answer_matching_domain(self, row_id=False):
|
||||
self.ensure_one()
|
||||
if self.question_type == "matrix":
|
||||
return ['&', '&', ('question_id', '=', self.question_id.id), ('matrix_row_id', '=', row_id), ('suggested_answer_id', '=', self.id)]
|
||||
elif self.question_type in ('multiple_choice', 'simple_choice'):
|
||||
return ['&', ('question_id', '=', self.question_id.id), ('suggested_answer_id', '=', self.id)]
|
||||
return []
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue