Skip to content

Commit

Permalink
Fix part of oppia#1366: Implement realtime layer for UserStats contin…
Browse files Browse the repository at this point in the history
…uous job (oppia#2238)

* Basic implementation; yet to be completed

* Partial implementation

* Branching in map based on different models

* Revert branching in map; Update UserStats model

* Realtime layer and model changes

* Add num_ratings in stats; resolve TODO for self

* Minor fixess in code

* Remove session id from rate exploration event model

* Minor fixes

* Fix unused argument

* Updations to realtime layer backend

* Display num ratings in view

* Fix backend tests

* Check num_ratings increase for same rating twice

* Use user_id as entity key as well

* Add tests using events for realtime layer

* Add tests for realtime layer

* Handle case when user rating already exists

* Minor changes to RateExplorationEventLogEntryModel

* Partially address review comments

* Address review comments

* Fix commented out backend test
  • Loading branch information
526avijitgupta authored and wxyxinyu committed Aug 8, 2016
1 parent 9f34971 commit 6f84fcd
Show file tree
Hide file tree
Showing 19 changed files with 525 additions and 101 deletions.
5 changes: 3 additions & 2 deletions core/controllers/dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,8 +213,9 @@ def _get_intro_card_color(category):
collection_summary.category),
})

dashboard_stats = user_services.get_user_dashboard_stats(
self.user_id)
dashboard_stats = (
user_jobs_continuous.UserStatsAggregator.get_dashboard_stats(
self.user_id))
dashboard_stats.update({
'total_open_feedback': feedback_services.get_total_open_threads(
feedback_thread_analytics)
Expand Down
49 changes: 37 additions & 12 deletions core/controllers/dashboard_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ def _rate_exploration(self, exp_id, ratings):
"""Create num_ratings ratings for exploration with exp_id,
of values from ratings.
"""
# Each user id needs to be unique since each user can only give an
# exploration one rating.
# Generate unique user ids to rate an exploration. Each user id needs
# to be unique since each user can only give an exploration one rating.
user_ids = ['user%d' % i for i in range(len(ratings))]
self.process_and_flush_pending_tasks()
for ind, user_id in enumerate(user_ids):
Expand Down Expand Up @@ -177,6 +177,7 @@ def test_one_play_for_single_exploration(self):
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 0)
self.assertIsNone(user_model.average_ratings)
self.logout()

Expand All @@ -196,6 +197,7 @@ def test_one_rating_for_single_exploration(self):
self.assertEquals(user_model.total_plays, 0)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 4)
self.logout()

Expand All @@ -222,6 +224,7 @@ def test_one_play_and_rating_for_single_exploration(self):
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 3)
self.logout()

Expand Down Expand Up @@ -250,6 +253,7 @@ def test_multiple_plays_and_ratings_for_single_exploration(self):
self.assertEquals(user_model.total_plays, 4)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 3)
self.assertEquals(user_model.average_ratings, 4)
self.logout()

Expand Down Expand Up @@ -278,13 +282,13 @@ def test_one_play_and_rating_for_multiple_explorations(self):
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 4)
self.logout()

def test_multiple_plays_and_ratings_for_multiple_explorations(self):
exploration_1 = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)

exploration_2 = self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)

Expand All @@ -302,17 +306,19 @@ def test_multiple_plays_and_ratings_for_multiple_explorations(self):
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._run_stats_aggregator_jobs()

self._rate_exploration(exp_id_1, [4])
self._rate_exploration(exp_id_2, [3])
self._rate_exploration(exp_id_2, [3, 3])

self._run_stats_aggregator_jobs()
self._run_user_stats_aggregator_job()

user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 3)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.average_ratings, 3.5)
self.assertEquals(user_model.num_ratings, 3)
self.assertEquals(user_model.average_ratings, 10/3.0)
self.logout()

def test_stats_for_single_exploration_with_multiple_owners(self):
Expand Down Expand Up @@ -351,13 +357,15 @@ def test_stats_for_single_exploration_with_multiple_owners(self):
self.assertEquals(user_model_1.total_plays, 2)
self.assertEquals(
user_model_1.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model_1.num_ratings, 3)
self.assertEquals(user_model_1.average_ratings, 4)

user_model_2 = user_models.UserStatsModel.get(
self.owner_id_2)
self.assertEquals(user_model_2.total_plays, 2)
self.assertEquals(
user_model_2.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model_2.num_ratings, 3)
self.assertEquals(user_model_2.average_ratings, 4)
self.logout()

Expand Down Expand Up @@ -389,30 +397,47 @@ def test_stats_for_multiple_explorations_with_multiple_owners(self):
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._run_stats_aggregator_jobs()

self._rate_exploration(exp_id_1, [3])
self._rate_exploration(exp_id_2, [2])
self._rate_exploration(exp_id_1, [5, 3])
self._rate_exploration(exp_id_2, [5, 5])

self._run_user_stats_aggregator_job()

expected_results = {
'total_plays': 5,
'num_ratings': 4,
'average_ratings': 18/4.0
}

user_model_2 = user_models.UserStatsModel.get(self.owner_id_2)
self.assertEquals(user_model_2.total_plays, 4)
self.assertEquals(
user_model_2.total_plays, expected_results['total_plays'])
self.assertEquals(
user_model_2.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model_2.average_ratings, 2.5)
self.assertEquals(
user_model_2.num_ratings, expected_results['num_ratings'])
self.assertEquals(
user_model_2.average_ratings, expected_results['average_ratings'])
self.logout()

self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)

user_model_1 = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model_1.total_plays, 4)
self.assertEquals(
user_model_1.total_plays, expected_results['total_plays'])
self.assertEquals(
user_model_1.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model_1.average_ratings, 2.5)
self.assertEquals(
user_model_1.num_ratings, expected_results['num_ratings'])
self.assertEquals(
user_model_1.average_ratings, expected_results['average_ratings'])
self.logout()


class DashboardHandlerTest(test_utils.GenericTestBase):

COLLABORATOR_EMAIL = '[email protected]'
Expand Down
11 changes: 11 additions & 0 deletions core/domain/event_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,17 @@ def _handle_event(
params, play_type)


class RateExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration rating events."""

EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION

@classmethod
def _handle_event(cls, exploration_id, user_id, rating, old_rating):
stats_models.RateExplorationEventLogEntryModel.create(
exploration_id, user_id, rating, old_rating)


class StateHitEventHandler(BaseEventHandler):
"""Event handler for recording state hit events."""

Expand Down
4 changes: 2 additions & 2 deletions core/domain/event_services_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ class EventHandlerTaskQueueUnitTests(test_utils.GenericTestBase):
def test_events_go_into_the_events_queue(self):
self.assertEqual(self.count_jobs_in_taskqueue(), 0)

event_services.StartExplorationEventHandler.record(
'eid1', 1, 'sid1', 'session1', {}, feconf.PLAY_TYPE_NORMAL)
event_services.CompleteExplorationEventHandler.record(
'eid1', 1, 'sid1', 'session1', 100, {}, feconf.PLAY_TYPE_NORMAL)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.assertEqual(self.count_jobs_in_taskqueue(
queue_name=taskqueue_services.QUEUE_NAME_EVENTS), 1)
Expand Down
16 changes: 10 additions & 6 deletions core/domain/exp_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -1152,20 +1152,24 @@ def _should_index(exp):
return rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE


def get_number_of_ratings(ratings):
return sum(ratings.values())


def get_average_rating(ratings):
"""Returns the average rating of the ratings as a float. If there are no
ratings, it will return 0.
"""
rating_weightings = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5}
if ratings:
rating_sum = 0.0
number_of_ratings = 0.0
number_of_ratings = get_number_of_ratings(ratings)
if number_of_ratings == 0:
return 0

for rating_value, rating_count in ratings.items():
rating_sum += rating_weightings[rating_value] * rating_count
number_of_ratings += rating_count
if number_of_ratings > 0:
return rating_sum / number_of_ratings
return 0
return rating_sum / (number_of_ratings * 1.0)


def get_scaled_average_rating(ratings):
Expand All @@ -1174,7 +1178,7 @@ def get_scaled_average_rating(ratings):
95%.
"""
# The following is the number of ratings.
n = sum(ratings.values())
n = get_number_of_ratings(ratings)
if n == 0:
return 0
average_rating = get_average_rating(ratings)
Expand Down
26 changes: 26 additions & 0 deletions core/domain/exp_services_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1809,6 +1809,7 @@ class ExplorationSearchTests(ExplorationServicesUnitTests):
"""Test exploration search."""

USER_ID_1 = 'user_1'
USER_ID_2 = 'user_2'

def test_demo_explorations_are_added_to_search_index(self):
results, _ = exp_services.search_explorations('Welcome', 2)
Expand Down Expand Up @@ -1975,6 +1976,31 @@ def mock_search(query_string, index, cursor=None, limit=20, sort='',
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)

def test_get_number_of_ratings(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp = exp_services.get_exploration_summary_by_id(self.EXP_ID)

self.assertEqual(exp_services.get_number_of_ratings(exp.ratings), 0)

rating_services.assign_rating_to_exploration(
self.owner_id, self.EXP_ID, 5)
self.assertEqual(
exp_services.get_number_of_ratings(exp.ratings), 1)

rating_services.assign_rating_to_exploration(
self.USER_ID_1, self.EXP_ID, 3)
self.process_and_flush_pending_tasks()
exp = exp_services.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
exp_services.get_number_of_ratings(exp.ratings), 2)

rating_services.assign_rating_to_exploration(
self.USER_ID_2, self.EXP_ID, 5)
self.process_and_flush_pending_tasks()
exp = exp_services.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
exp_services.get_number_of_ratings(exp.ratings), 3)

def test_get_average_rating(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp = exp_services.get_exploration_summary_by_id(self.EXP_ID)
Expand Down
4 changes: 4 additions & 0 deletions core/domain/rating_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import datetime

from core.domain import event_services
from core.domain import exp_services
from core.platform import models
import feconf
Expand Down Expand Up @@ -72,6 +73,9 @@ def _update_user_rating():
if old_rating:
exploration_summary.ratings[str(old_rating)] -= 1

event_services.RateExplorationEventHandler.record(
exploration_id, user_id, new_rating, old_rating)

exploration_summary.scaled_average_rating = (
exp_services.get_scaled_average_rating(
exploration_summary.ratings))
Expand Down
Loading

0 comments on commit 6f84fcd

Please sign in to comment.