Skip to content

Commit

Permalink
Fix oppia#1366: Display top unresolved answers and new feedback for d…
Browse files Browse the repository at this point in the history
…ashboard explorations in a dropdown (oppia#2296)

* Fetch top 3 unresolved answers per exp

* Address review comments

* Return state with each answer, modify tests

* Fetch top new feedback for explorations

* Fix tests, pass limit as argument

* Remove print statements, add tests for limiting

* Remove old code added while merging

* Handle empty list passed to IN error

* Skeletal code for dropdown

* Dropdown appear on click, improve styling

* Fetch latest updates for explorations

* Add test for new updates check

* Desktop frontend changes, minor fix in get_multi

* Code fixes

* Various fixes after manual testing

* css fixes, partially working on mobile

* css alphabetic ordering, caret up in mobile view

* Finer use of event stopPropagation

* Fetch and send feedback messages, not threads to payload

* Fix last updates function and tests

* fixes to feedback threads fetching and display

* Minor fixes

* Fix failing feedback_services_tests

* Address initial review comments

* Fix datetime is not json serializable error

* Remove unncessary checks for timestamps

* Fix frontend tests sample serializer, underline on tabs text hover

* Address review comments

* Minor fixes to sorting after manual testing

* Address review comments

* Fix long pending backend tests failure

* Fix layout issues in dashboard

* Leave off recent updates for now

* Fix failing backend test

* Create separate route on backend for new feedback

* Cache and display data on frontend request

* Fix failing base backend test

* Add unresolved answers, feedback counts to initial payload

* Fix url and write frontend tests

* Remove constant for updates, minor code improvements

* Fix failing backend tests

* Fix empty text for anonymous feedback

* Add message to indicate fetching data

* Partially address review comments

* Address backend review comments

* Adress remaining review comments

* Return  promise directly from service

* Minor improvements in code

* Address review comments

* Partially address review comments

* Adress review comments

* Update branch and add link to feedback in dropdown

* Fix failing frontend and linting tests

* Revert handling request timeout for dashboard requests

* Remove warning message const from base.js

* Revert default query limit

* Address backend review comments

* Display total counts in dropdown

* Fix count of feedack and unresolved answers in dropdown

* Move directive to inside dashboard directory

* Fix alphabetic ordering of imports
  • Loading branch information
526avijitgupta authored Oct 2, 2016
1 parent 1e87212 commit 7018b3d
Show file tree
Hide file tree
Showing 18 changed files with 729 additions and 251 deletions.
2 changes: 1 addition & 1 deletion core/controllers/base_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def test_that_no_get_results_in_500_error(self):
response = self.testapp.get(url, expect_errors=True)
self.log_line(
'Fetched %s with status code %s' % (url, response.status_int))
self.assertIn(response.status_int, [200, 302, 404])
self.assertIn(response.status_int, [200, 302, 401, 404])

# TODO(sll): Add similar tests for POST, PUT, DELETE.
# TODO(sll): Set a self.payload attr in the BaseHandler for
Expand Down
29 changes: 26 additions & 3 deletions core/controllers/dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,17 +175,25 @@ def _round_average_ratings(rating):
exploration_ids_subscribed_to))

unresolved_answers_dict = (
stats_services.get_exps_unresolved_answers_count_for_default_rule(
stats_services.get_exps_unresolved_answers_for_default_rule(
exploration_ids_subscribed_to))

total_unresolved_answers = 0

for ind, exploration in enumerate(exp_summary_list):
exploration.update(feedback_thread_analytics[ind].to_dict())
exploration.update({
'num_unresolved_answers': (
unresolved_answers_dict[exploration['id']]
unresolved_answers_dict[exploration['id']]['count']
if exploration['id'] in unresolved_answers_dict else 0
),
'top_unresolved_answers': (
unresolved_answers_dict[exploration['id']]
['unresolved_answers']
[:feconf.TOP_UNRESOLVED_ANSWERS_COUNT_DASHBOARD]
)
})
total_unresolved_answers += exploration['num_unresolved_answers']

exp_summary_list = sorted(
exp_summary_list,
Expand Down Expand Up @@ -222,7 +230,8 @@ def _round_average_ratings(rating):
self.user_id))
dashboard_stats.update({
'total_open_feedback': feedback_services.get_total_open_threads(
feedback_thread_analytics)
feedback_thread_analytics),
'total_unresolved_answers': total_unresolved_answers
})
if dashboard_stats and dashboard_stats.get('average_ratings'):
dashboard_stats['average_ratings'] = (
Expand Down Expand Up @@ -268,6 +277,20 @@ def get(self):
})


class ExplorationDashboardStatsHandler(base.BaseHandler):
"""Returns the most recent open feedback for an exploration."""

@base.require_fully_signed_up
def get(self, exploration_id):
"""Handles GET requests."""
self.render_json({
'open_feedback': [
feedback_message.to_dict()
for feedback_message in
feedback_services.get_most_recent_messages(exploration_id)]
})


class NewExploration(base.BaseHandler):
"""Creates a new exploration."""

Expand Down
26 changes: 26 additions & 0 deletions core/domain/feedback_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,31 @@ def _get_thread_from_model(thread_model):
thread_model.created_on, thread_model.last_updated)


def get_most_recent_messages(exp_id):
"""Fetch the most recently updated feedback threads for a given exploration,
and then get the latest feedback message out of each thread.
Args:
exp_id: str.
Returns:
A list of FeedbackMessage.
"""
thread_models = (
feedback_models.FeedbackThreadModel.get_threads(
exp_id, limit=feconf.OPEN_FEEDBACK_COUNT_DASHBOARD))

message_models = []
for thread_model in thread_models:
message_models.append(
feedback_models.FeedbackMessageModel.get_most_recent_message(
exp_id, thread_model.thread_id))

return [
_get_message_from_model(message_model)
for message_model in message_models]


def get_threads(exploration_id):
"""Fetches all the threads for the given exploration id.
Expand Down Expand Up @@ -416,6 +441,7 @@ def get_open_threads(exploration_id, has_suggestion):
open_threads.append(thread)
return open_threads


def get_closed_threads(exploration_id, has_suggestion):
"""Fetches all closed threads of the given exploration id.
Expand Down
12 changes: 12 additions & 0 deletions core/domain/feedback_services_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,18 @@ def _run_computation(self):
0)
self.process_and_flush_pending_tasks()

def test_get_threads_single_exploration(self):
threads = feedback_services.get_threads(self.EXP_ID_1)
self.assertEqual(len(threads), 0)
feedback_services.create_thread(
self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], None,
self.EXPECTED_THREAD_DICT['subject'], 'not used here')

threads = feedback_services.get_threads(self.EXP_ID_1)
self.assertEqual(1, len(threads))
self.assertDictContainsSubset(self.EXPECTED_THREAD_DICT,
threads[0].to_dict())

def test_get_all_threads(self):
# Create an anonymous feedback thread
feedback_services.create_thread(
Expand Down
97 changes: 66 additions & 31 deletions core/domain/stats_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,46 +40,81 @@ def get_top_unresolved_answers_for_default_rule(exploration_id, state_name):
}


def get_exps_unresolved_answers_count_for_default_rule(exp_ids):
"""Gets answer counts per exploration for the answer groups for default
rule across all states for explorations with ids in exp_ids.
def get_exps_unresolved_answers_for_default_rule(exp_ids):
"""Gets unresolved answers per exploration for default rule across all
states for explorations with ids in exp_ids. The value of total count should
match the sum of values of indiviual counts for each unresolved answer.
Note that this method currently returns the counts only for the DEFAULT
rule. This should ideally handle all types of unresolved answers.
TODO(526avijitgupta): Note that this method currently returns the data only
for the DEFAULT rule. This should ideally handle all types of unresolved
answers.
Returns:
A dict, keyed by the string '{exp_id}', whose values are the number of
unresolved answers that exploration has. Any exp_ids for explorations
that don't exist or that have been deleted will be ignored, and not
included in the return value.
Returns a dict of the following format:
{
'exp_id_1': {
'count': 7 (number of unresolved answers for this exploration),
'unresolved_answers': (list of unresolved answers sorted by count)
[
{'count': 4, 'value': 'answer_1', 'state': 'Introduction'},
{'count': 2, 'value': 'answer_2', 'state': 'Introduction'},
{'count': 1, 'value': 'answer_3', 'state': 'End'}
]
},
'exp_id_2': {
'count': 13,
'unresolved_answers':
[
{'count': 8, 'value': 'answer_5', 'state': 'Introduction'},
{'count': 3, 'value': 'answer_4', 'state': 'Quest'},
{'count': 1, 'value': 'answer_6', 'state': 'End'}
{'count': 1, 'value': 'answer_8', 'state': 'End'}
]
}
}
"""
explorations = exp_services.get_multiple_explorations_by_id(
exp_ids, strict=False)

# The variable `exploration_states_tuples` is a list of all
# (exp_id, state_name) tuples for the given exp_ids.
# E.g. - [
# ('eid1', 'Introduction'),
# ('eid1', 'End'),
# ('eid2', 'Introduction'),
# ('eid3', 'Introduction')
# ]
# when exp_ids = ['eid1', 'eid2', 'eid3'].
explorations_states_tuples = [
(exp_domain_object.id, state_key)
for exp_domain_object in explorations.values()
for state_key in exp_domain_object.states
]
def _get_explorations_states_tuples_by_ids(exp_ids):
"""Returns a list of all (exp_id, state_name) tuples for the given
exp_ids.
E.g. - [
('eid1', 'Introduction'),
('eid1', 'End'),
('eid2', 'Introduction'),
('eid3', 'Introduction')
]
when exp_ids = ['eid1', 'eid2', 'eid3'].
"""
explorations = (
exp_services.get_multiple_explorations_by_id(exp_ids, strict=False))
return [
(exploration.id, state_name)
for exploration in explorations.values()
for state_name in exploration.states
]

explorations_states_tuples = _get_explorations_states_tuples_by_ids(exp_ids)
exploration_states_answers_list = get_top_state_rule_answers_multi(
explorations_states_tuples, [exp_domain.DEFAULT_RULESPEC_STR])
exps_answers_mapping = {}

for ind, statewise_answers in enumerate(exploration_states_answers_list):
exp_id = explorations_states_tuples[ind][0]
if exp_id not in exps_answers_mapping:
exps_answers_mapping[exp_id] = {
'count': 0,
'unresolved_answers': []
}
for answer in statewise_answers:
exp_id = explorations_states_tuples[ind][0]
if exp_id not in exps_answers_mapping:
exps_answers_mapping[exp_id] = 0
exps_answers_mapping[exp_id] += answer['count']
exps_answers_mapping[exp_id]['count'] += answer['count']
answer['state'] = explorations_states_tuples[ind][1]

exps_answers_mapping[exp_id]['unresolved_answers'].extend(
statewise_answers)

for exp_id in exps_answers_mapping:
exps_answers_mapping[exp_id]['unresolved_answers'] = (sorted(
exps_answers_mapping[exp_id]['unresolved_answers'],
key=lambda a: a['count'],
reverse=True))

return exps_answers_mapping

Expand Down
Loading

0 comments on commit 7018b3d

Please sign in to comment.