forked from oppia/oppia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
jobs.py
1797 lines (1484 loc) · 68.8 KB
/
jobs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing long running jobs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import copy
import datetime
import json
import logging
import traceback
from core.platform import models
import python_utils
import utils
from google.appengine.api import app_identity
from google.appengine.ext import ndb
from mapreduce import base_handler
from mapreduce import context
from mapreduce import input_readers
from mapreduce import mapreduce_pipeline
from mapreduce import output_writers
from mapreduce import util as mapreduce_util
from pipeline import pipeline
(base_models, job_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.job])
taskqueue_services = models.Registry.import_taskqueue_services()
transaction_services = models.Registry.import_transaction_services()
MAPPER_PARAM_KEY_ENTITY_KINDS = 'entity_kinds'
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS = 'queued_time_msecs'
# Name of an additional parameter to pass into the MR job for cleaning up
# old auxiliary job models.
MAPPER_PARAM_MAX_START_TIME_MSEC = 'max_start_time_msec'
STATUS_CODE_NEW = job_models.STATUS_CODE_NEW
STATUS_CODE_QUEUED = job_models.STATUS_CODE_QUEUED
STATUS_CODE_STARTED = job_models.STATUS_CODE_STARTED
STATUS_CODE_COMPLETED = job_models.STATUS_CODE_COMPLETED
STATUS_CODE_FAILED = job_models.STATUS_CODE_FAILED
STATUS_CODE_CANCELED = job_models.STATUS_CODE_CANCELED
VALID_STATUS_CODE_TRANSITIONS = {
STATUS_CODE_NEW: [STATUS_CODE_QUEUED],
STATUS_CODE_QUEUED: [STATUS_CODE_STARTED, STATUS_CODE_CANCELED],
STATUS_CODE_STARTED: [
STATUS_CODE_COMPLETED, STATUS_CODE_FAILED, STATUS_CODE_CANCELED],
STATUS_CODE_COMPLETED: [],
STATUS_CODE_FAILED: [],
STATUS_CODE_CANCELED: [],
}
# The default amount of time that defines a 'recent' job. Jobs that were
# queued more recently than this number of milliseconds ago are considered
# 'recent'.
DEFAULT_RECENCY_MSEC = 14 * 24 * 60 * 60 * 1000
# The maximum number of previously-run jobs to show in the admin dashboard.
NUM_JOBS_IN_DASHBOARD_LIMIT = 100
class BaseJobManager(python_utils.OBJECT):
"""Base class for managing long-running jobs.
These jobs are not transaction-safe, and multiple jobs of the same kind
may run at once and overlap. Individual jobs should account for this. In
particular, if a job writes to some location, no other enqueued or running
job should be writing to, or reading from, that location.
This is expected to be the case for one-off migration jobs, as well as
batch reporting jobs. One-off migration jobs are expected to be transient
and will not be a permanent part of the codebase. Batch reporting jobs are
expected to write to a particular datastore model that is optimized for
fast querying; each batch reporting job should correspond to exactly one of
these models. The reporting jobs are expected to be run as MapReduces; to
find existing ones, search for subclasses of BaseMapReduceJobManager.
Note that the enqueue(), register_start(), register_completion(),
register_failure() and cancel() methods in this class batch the following
operations:
(a) Running pre- and post-hooks
(b) Updating the status of the job in the datastore
(c) Performing the operation.
Each entire batch is not run in a transaction, but subclasses can still
perform (a) or (c) transactionally if they wish to.
"""
@classmethod
def _is_abstract(cls):
"""Checks if the job is created using the abstract base manager class.
Returns:
bool. Whether the job is created using abstract base manager class.
"""
return cls in ABSTRACT_BASE_CLASSES
@classmethod
def create_new(cls):
"""Creates a new job of this class type.
Returns:
str. The unique id of this job.
Raises:
Exception: This method (instead of a subclass method) was directly
used to create a new job.
"""
if cls._is_abstract():
raise Exception(
'Tried to directly create a job using the abstract base '
'manager class %s, which is not allowed.' % cls.__name__)
def _create_new_job():
"""Creates a new job by generating a unique id and inserting
it into the model.
Returns:
str. The unique job id.
"""
job_id = job_models.JobModel.get_new_id(cls.__name__)
job_models.JobModel(id=job_id, job_type=cls.__name__).put()
return job_id
return transaction_services.run_in_transaction(_create_new_job)
@classmethod
def enqueue(cls, job_id, queue_name, additional_job_params=None):
"""Marks a job as queued and adds it to a queue for processing.
Args:
job_id: str. The ID of the job to enqueue.
queue_name: str. The queue name the job should be run in. See
core.platform.taskqueue.gae_taskqueue_services for supported
values.
additional_job_params: dict(str : *) or None. Additional parameters
for the job.
"""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_QUEUED)
cls._require_correct_job_type(model.job_type)
# Enqueue the job.
cls._pre_enqueue_hook(job_id)
cls._real_enqueue(job_id, queue_name, additional_job_params)
model.status_code = STATUS_CODE_QUEUED
model.time_queued_msec = utils.get_current_time_in_millisecs()
model.additional_job_params = additional_job_params
model.put()
cls._post_enqueue_hook(job_id)
@classmethod
def register_start(cls, job_id, metadata=None):
"""Marks a job as started.
Args:
job_id: str. The ID of the job to start.
metadata: dict(str : *) or None. Additional metadata of the job.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_STARTED)
cls._require_correct_job_type(model.job_type)
cls._pre_start_hook(job_id)
model.metadata = metadata
model.status_code = STATUS_CODE_STARTED
model.time_started_msec = utils.get_current_time_in_millisecs()
model.put()
cls._post_start_hook(job_id)
@classmethod
def register_completion(cls, job_id, output_list):
"""Marks a job as completed.
Args:
job_id: str. The ID of the job to complete.
output_list: list(object). The output produced by the job.
"""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_COMPLETED)
cls._require_correct_job_type(model.job_type)
model.status_code = STATUS_CODE_COMPLETED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.output = cls._compress_output_list(output_list)
model.put()
cls._post_completed_hook(job_id)
@classmethod
def _compress_output_list(
cls, output_list, test_only_max_output_len_chars=None):
"""Returns compressed list of strings within a max length of chars.
Ensures that the payload (i.e.,
[python_utils.UNICODE(output) for output in output_list])
makes up at most max_output_chars of the final output data.
Args:
output_list: list(*). Collection of objects to be stringified.
test_only_max_output_len_chars: int or None. Overrides the intended
max output len limit when not None.
Returns:
list(str). The compressed stringified output values.
"""
_max_output_len_chars = 900000
class _OrderedCounter(collections.Counter, collections.OrderedDict):
"""Counter that remembers the order elements are first encountered.
We use this class so that our tests can rely on deterministic
ordering, instead of simply using `collections.Counter` which has
non-deterministic ordering.
"""
pass
# Consolidate the lines of output since repeating them isn't useful.
counter = _OrderedCounter(
python_utils.UNICODE(output) for output in output_list)
output_str_list = [
output_str if count == 1 else '(%dx) %s' % (count, output_str)
for (output_str, count) in counter.items()
]
# Truncate outputs to fit within given max length.
remaining_len = (
_max_output_len_chars if test_only_max_output_len_chars is None else
test_only_max_output_len_chars)
for idx, output_str in enumerate(output_str_list):
remaining_len -= len(output_str)
if remaining_len < 0:
# Truncate all remaining output to fit in the limit.
kept_str = output_str[:remaining_len]
output_str_list[idx:] = [
('%s <TRUNCATED>' % kept_str) if kept_str else '<TRUNCATED>'
]
break
return output_str_list
@classmethod
def register_failure(cls, job_id, error):
"""Marks a job as failed.
Args:
job_id: str. The ID of the job to fail.
error: str. The error raised by the job.
"""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_FAILED)
cls._require_correct_job_type(model.job_type)
model.status_code = STATUS_CODE_FAILED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.error = error
model.put()
cls._post_failure_hook(job_id)
@classmethod
def cancel(cls, job_id, user_id):
"""Marks a job as canceled.
Args:
job_id: str. The ID of the job to cancel.
user_id: str. The id of the user who cancelled the job.
"""
# Ensure that preconditions are met.
model = job_models.JobModel.get(job_id, strict=True)
cls._require_valid_transition(
job_id, model.status_code, STATUS_CODE_CANCELED)
cls._require_correct_job_type(model.job_type)
cancel_message = 'Canceled by %s' % (user_id or 'system')
# Cancel the job.
cls._pre_cancel_hook(job_id, cancel_message)
model.status_code = STATUS_CODE_CANCELED
model.time_finished_msec = utils.get_current_time_in_millisecs()
model.error = cancel_message
model.put()
cls._post_cancel_hook(job_id, cancel_message)
@classmethod
def is_active(cls, job_id):
"""Returns whether the job is still active.
Args:
job_id: str. The ID of the job to query.
Returns:
bool. Whether the job is active or not.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code in [STATUS_CODE_QUEUED, STATUS_CODE_STARTED]
@classmethod
def has_finished(cls, job_id):
"""Returns whether the job has finished.
Args:
job_id: str. The ID of the job to query.
Returns:
bool. Whether the job has finished or not.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code in [STATUS_CODE_COMPLETED, STATUS_CODE_FAILED]
@classmethod
def cancel_all_unfinished_jobs(cls, user_id):
"""Cancels all queued or started jobs of this job type.
Args:
user_id: str. The id of the user who is cancelling the jobs.
"""
unfinished_job_models = job_models.JobModel.get_unfinished_jobs(
cls.__name__)
for model in unfinished_job_models:
cls.cancel(model.id, user_id)
@classmethod
def _real_enqueue(cls, job_id, queue_name, additional_job_params):
"""Does the actual work of enqueueing a job for deferred execution.
Args:
job_id: str. The ID of the job to enqueue.
queue_name: str. The queue name the job should be run in. See
core.platform.taskqueue.gae_taskqueue_services for supported
values.
additional_job_params: dict(str : *) or None. Additional parameters
on jobs.
"""
raise NotImplementedError(
'Subclasses of BaseJobManager should implement _real_enqueue().')
@classmethod
def get_status_code(cls, job_id):
"""Returns the status code of the job.
Args:
job_id: str. The ID of the job to query.
Returns:
str. Status code of the job.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.status_code
@classmethod
def get_time_queued_msec(cls, job_id):
"""Returns the time the job got queued.
Args:
job_id: str. The ID of the job to query.
Returns:
float. The time the job got queued in milliseconds after the Epoch.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_queued_msec
@classmethod
def get_time_started_msec(cls, job_id):
"""Returns the time the job got started.
Args:
job_id: str. The ID of the job to query.
Returns:
float. The time the job got started in milliseconds after the Epoch.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_started_msec
@classmethod
def get_time_finished_msec(cls, job_id):
"""Returns the time the job got finished.
Args:
job_id: str. The ID of the job to query.
Returns:
float. The time the job got finished in milliseconds after the
Epoch.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.time_finished_msec
@classmethod
def get_metadata(cls, job_id):
"""Returns the metadata of the job.
Args:
job_id: str. The ID of the job to query.
Returns:
dict(str : *). The metadata of the job.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.metadata
@classmethod
def get_output(cls, job_id):
"""Returns the output of the job.
Args:
job_id: str. The ID of the job to query.
Returns:
*. The output of the job.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.output
@classmethod
def get_error(cls, job_id):
"""Returns the error encountered by the job.
Args:
job_id: str. The ID of the job to query.
Returns:
str. Describes the error encountered by the job.
"""
model = job_models.JobModel.get(job_id, strict=True)
cls._require_correct_job_type(model.job_type)
return model.error
@classmethod
def _require_valid_transition(
cls, job_id, old_status_code, new_status_code):
"""Asserts that the transition of the job status code is valid.
Args:
job_id: str. The ID of the job to query.
old_status_code: str. Old status code.
new_status_code: str. New status code.
Raises:
Exception: The given status code change is invalid.
"""
valid_new_status_codes = VALID_STATUS_CODE_TRANSITIONS[old_status_code]
if new_status_code not in valid_new_status_codes:
raise Exception(
'Invalid status code change for job %s: from %s to %s' %
(job_id, old_status_code, new_status_code))
@classmethod
def _require_correct_job_type(cls, job_type):
"""Returns validity of given job type.
Args:
job_type: str. Name of a job class.
Raises:
Exception: The given job type is incorrect.
"""
if job_type != cls.__name__:
raise Exception(
'Invalid job type %s for class %s' % (job_type, cls.__name__))
@classmethod
def _pre_enqueue_hook(cls, job_id):
"""A hook or a callback function triggered before enqueuing a job.
Args:
job_id: str. The unique ID of the job which is to be enqueued.
"""
pass
@classmethod
def _post_enqueue_hook(cls, job_id):
"""A hook or a callback function triggered after enqueuing a job.
Args:
job_id: str. The unique ID of the job which was enqueued.
"""
pass
@classmethod
def _pre_start_hook(cls, job_id):
"""A hook or a callback function triggered before marking a job
as started.
Args:
job_id: str. The unique ID of the job to be marked as started.
"""
pass
@classmethod
def _post_start_hook(cls, job_id):
"""A hook or a callback function triggered after marking a job as
started.
Args:
job_id: str. The unique ID of the job marked as started.
"""
pass
@classmethod
def _post_completed_hook(cls, job_id):
"""A hook or a callback function triggered after marking a job as
completed.
Args:
job_id: str. The unique ID of the job marked as completed.
"""
pass
@classmethod
def _post_failure_hook(cls, job_id):
"""A hook or a callback function triggered after marking a job as
failed.
Args:
job_id: str. The unique ID of the job marked as failed.
"""
pass
@classmethod
def _pre_cancel_hook(cls, job_id, cancel_message):
"""A hook or a callback function triggered before marking a job as
cancelled.
Args:
job_id: str. The unique ID of the job to be marked as cancelled.
cancel_message: str. The message to be displayed before
cancellation.
"""
pass
@classmethod
def _post_cancel_hook(cls, job_id, cancel_message):
"""A hook or a callback function triggered after marking a job as
cancelled.
Args:
job_id: str. The unique ID of the job marked as cancelled.
cancel_message: str. The message to be displayed after cancellation.
"""
pass
class BaseDeferredJobManager(BaseJobManager):
"""Base class to run a job/method as deferred task. These tasks will be
pushed to the default taskqueue.
"""
@classmethod
def _run(cls, additional_job_params):
"""Function that performs the main business logic of the job.
Args:
additional_job_params: dict(str : *). Additional parameters on jobs.
"""
raise NotImplementedError
@classmethod
def _run_job(cls, job_id, additional_job_params):
"""Starts the job.
Args:
job_id: str. The ID of the job to run.
additional_job_params: dict(str : *). Additional parameters on job.
Raises:
PermanentTaskFailure: No further work can be scheduled.
"""
logging.info(
'Job %s started at %s' %
(job_id, utils.get_current_time_in_millisecs()))
cls.register_start(job_id)
try:
result = cls._run(additional_job_params)
except Exception as e:
logging.error(traceback.format_exc())
logging.error(
'Job %s failed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
cls.register_failure(
job_id, '%s\n%s'
% (python_utils.UNICODE(e), traceback.format_exc()))
raise taskqueue_services.PermanentTaskFailure(
'Task failed: %s\n%s'
% (python_utils.UNICODE(e), traceback.format_exc()))
# Note that the job may have been canceled after it started and before
# it reached this stage. This will result in an exception when the
# validity of the status code transition is checked.
cls.register_completion(job_id, [result])
logging.info(
'Job %s completed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
@classmethod
def _real_enqueue(cls, job_id, queue_name, additional_job_params):
"""Puts the job in the task queue.
Args:
job_id: str. The ID of the job to enqueue.
queue_name: str. The queue name the job should be run in. See
core.platform.taskqueue.gae_taskqueue_services for supported
values.
additional_job_params: dict(str : *) or None. Additional params to
pass into the job's _run() method.
"""
taskqueue_services.defer(
cls._run_job, queue_name, job_id, additional_job_params)
class MapReduceJobPipeline(base_handler.PipelineBase):
"""This class inherits from the PipelineBase class which are used to
connect various workflows/functional procedures together. It implements
a run method which is called when this job is started by using start()
method on the object created from this class.
"""
def run(self, job_id, job_class_str, kwargs):
"""Returns a coroutine which runs the job pipeline and stores results.
Args:
job_id: str. The ID of the job to run.
job_class_str: str. Should uniquely identify each type of job.
kwargs: dict(str : object). Extra arguments used to build the
MapreducePipeline.
Yields:
MapreducePipeline. Ready to start processing. Expects the output of
that pipeline to be sent back.
StoreMapReduceResults. Will be constructed with whatever output the
caller sends back to the coroutine.
"""
job_class = mapreduce_util.for_name(job_class_str)
job_class.register_start(job_id, metadata={
job_class._OUTPUT_KEY_ROOT_PIPELINE_ID: self.root_pipeline_id # pylint: disable=protected-access
})
# TODO(sll): Need try/except/mark-as-canceled here?
output = yield mapreduce_pipeline.MapreducePipeline(**kwargs)
yield StoreMapReduceResults(job_id, job_class_str, output)
def finalized(self):
"""Suppresses the default pipeline behavior of sending email."""
# TODO(sll): Should mark-as-done be here instead?
pass
class StoreMapReduceResults(base_handler.PipelineBase):
"""MapreducePipeline class to store output results."""
def run(self, job_id, job_class_str, output):
"""Extracts the results of a MR job and registers its completion.
Args:
job_id: str. The ID of the job to run.
job_class_str: str. Should uniquely identify each type of job.
output: str. The output produced by the job.
"""
job_class = mapreduce_util.for_name(job_class_str)
try:
iterator = input_readers.GoogleCloudStorageInputReader(
output, 0)
results_list = []
for item_reader in iterator:
for item in item_reader:
results_list.append(json.loads(item))
job_class.register_completion(job_id, results_list)
except Exception as e:
logging.error(traceback.format_exc())
logging.error(
'Job %s failed at %s' %
(job_id, utils.get_current_time_in_millisecs()))
job_class.register_failure(
job_id,
'%s\n%s' % (python_utils.UNICODE(e), traceback.format_exc()))
class GoogleCloudStorageConsistentJsonOutputWriter(
output_writers.GoogleCloudStorageConsistentOutputWriter):
"""This is an Output Writer which is used to consistently store MapReduce
job's results in json format. GoogleCloudStorageConsistentOutputWriter is
preferred as it's consistent. For more details please look here
https://github.com/GoogleCloudPlatform/appengine-mapreduce/wiki/3.4-Readers-and-Writers#googlecloudstorageoutputwriter
"""
def write(self, data):
"""Writes that data serialized in JSON format.
Args:
data: *. Data to be serialized in JSON format.
"""
super(GoogleCloudStorageConsistentJsonOutputWriter, self).write(
python_utils.convert_to_bytes('%s\n' % json.dumps(data)))
class BaseMapReduceJobManager(BaseJobManager):
"""The output for this job is a list of individual results. Each item in the
list will be of whatever type is yielded from the 'reduce' method.
The 'metadata' field in the BaseJob representing a MapReduceJob is a dict
with one key, _OUTPUT_KEY_ROOT_PIPELINE_ID. The corresponding value is a
string representing the ID of the MapReduceJobPipeline as known to the
mapreduce/lib/pipeline internals. This is used to generate URLs pointing at
the pipeline support UI.
"""
_OUTPUT_KEY_ROOT_PIPELINE_ID = 'root_pipeline_id'
@staticmethod
def get_mapper_param(param_name):
"""Returns current value of given param_name for this job.
Args:
param_name: str. One of the configurable parameters of this
particular mapreduce job.
Returns:
*. The current value of the parameter.
Raises:
Exception: The parameter is not associated to this job type.
"""
return context.get().mapreduce_spec.mapper.params[param_name]
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement '
'entity_classes_to_map_over()')
@staticmethod
def map(item):
"""Implements the map function. Must be declared @staticmethod.
This function may yield as many times as appropriate (including zero)
to return key/value 2-tuples. For example, to get a count of all
explorations, one might yield (exploration.id, 1).
WARNING: The OutputWriter converts mapper output keys to type str. So,
if you have keys that are of type unicode, you must yield
"key.encode('utf-8')", rather than "key".
Args:
item: *. A single element of the type given by entity_class().
"""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement map '
'as a @staticmethod.')
@staticmethod
def reduce(key, values):
"""Implements the reduce function. Must be declared @staticmethod.
This function should yield a JSON string. All emitted outputs from all
reducers will be collected in an array and set into the output value
for the job, so don't pick anything huge. If you need something huge,
persist it out into the datastore instead and return a reference (and
dereference it later to load content as needed).
This code can assume that it is the only process handling values for the
given key.
TODO(brianrodri): Verify whether it can also assume that it will be
called exactly once for each key with all of the output.
Args:
key: *. A key value as emitted from the map() function, above.
values: list(*). A list of all values from all mappers that were
tagged with the given key.
"""
raise NotImplementedError(
'Classes derived from BaseMapReduceJobManager must implement '
'reduce as a @staticmethod.')
@classmethod
def _real_enqueue(cls, job_id, queue_name, additional_job_params):
"""Configures, creates, and queues the pipeline for the given job and
params.
Args:
job_id: str. The ID of the job to enqueue.
queue_name: str. The queue name the job should be run in. See
core.platform.taskqueue.gae_taskqueue_services for supported
values.
additional_job_params: dict(str : *) or None. Additional params to
pass into the job's _run() method.
Raises:
Exception: Passed a value to a parameter in the mapper which has
already been given a value.
"""
entity_class_types = cls.entity_classes_to_map_over()
entity_class_names = [
'%s.%s' % (entity_class_type.__module__, entity_class_type.__name__)
for entity_class_type in entity_class_types]
kwargs = {
'job_name': job_id,
'mapper_spec': '%s.%s.map' % (cls.__module__, cls.__name__),
'reducer_spec': '%s.%s.reduce' % (cls.__module__, cls.__name__),
'input_reader_spec': (
'core.jobs.MultipleDatastoreEntitiesInputReader'),
'output_writer_spec': (
'core.jobs.GoogleCloudStorageConsistentJsonOutputWriter'),
'mapper_params': {
MAPPER_PARAM_KEY_ENTITY_KINDS: entity_class_names,
# Note that all parameters passed to the mapper need to be
# strings. Also note that the value for this key is determined
# just before enqueue time, so it will be roughly equal to the
# actual enqueue time.
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS: python_utils.UNICODE(
utils.get_current_time_in_millisecs()),
},
'reducer_params': {
'output_writer': {
'bucket_name': app_identity.get_default_gcs_bucket_name(),
'content_type': 'text/plain',
'naming_format': 'mrdata/$name/$id/output-$num',
}
}
}
if additional_job_params is not None:
for param_name in additional_job_params:
if param_name in kwargs['mapper_params']:
raise Exception(
'Additional job param %s shadows an existing mapper '
'param' % param_name)
kwargs['mapper_params'][param_name] = copy.deepcopy(
additional_job_params[param_name])
mr_pipeline = MapReduceJobPipeline(
job_id, '%s.%s' % (cls.__module__, cls.__name__), kwargs)
mr_pipeline.start(
base_path='/mapreduce/worker/pipeline', queue_name=queue_name)
@classmethod
def _pre_cancel_hook(cls, job_id, cancel_message):
"""A hook or a callback function triggered before marking a job as
cancelled.
Args:
job_id: str. The unique ID of the job to be marked as cancelled.
cancel_message: str. The message to be displayed before
cancellation.
"""
metadata = cls.get_metadata(job_id)
root_pipeline_id = metadata[cls._OUTPUT_KEY_ROOT_PIPELINE_ID]
pipeline.Pipeline.from_id(root_pipeline_id).abort(cancel_message)
@staticmethod
def entity_created_before_job_queued(entity):
"""Checks that the given entity was created before the MR job was
queued.
Mapper methods may want to use this as a precomputation check,
especially if the datastore classes being iterated over are append-only
event logs.
Args:
entity: BaseModel. An entity this job type is responsible for
handling.
Returns:
bool. Whether the entity was queued before the job was created.
"""
created_on_msec = utils.get_time_in_millisecs(entity.created_on)
job_queued_msec = float(context.get().mapreduce_spec.mapper.params[
MAPPER_PARAM_KEY_QUEUED_TIME_MSECS])
return job_queued_msec >= created_on_msec
class BaseMapReduceOneOffJobManager(BaseMapReduceJobManager):
"""Overriden to force subclass jobs into the one-off jobs queue."""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
"""Marks a job as queued and adds it to a queue for processing.
Args:
job_id: str. The ID of the job to enqueue.
additional_job_params: dict(str : *) or None. Additional parameters
for the job.
"""
super(BaseMapReduceOneOffJobManager, cls).enqueue(
job_id, taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS,
additional_job_params=additional_job_params)
class MultipleDatastoreEntitiesInputReader(input_readers.InputReader):
"""This Input Reader is used to read values from multiple
classes in the datastore and pass them to mapper functions in MapReduce
jobs.
"""
_ENTITY_KINDS_PARAM = MAPPER_PARAM_KEY_ENTITY_KINDS
_READER_LIST_PARAM = 'readers'
def __init__(self, reader_list):
self._reader_list = reader_list
def __iter__(self):
for reader in self._reader_list:
yield reader
@classmethod
def from_json(cls, input_shard_state):
"""Creates an instance of the InputReader for the given input shard
state.
Args:
input_shard_state: dict(str : *). The InputReader state as a
dict-like object.
Returns:
*. An instance of the InputReader configured using the input shard
state.
"""
return cls(input_readers.DatastoreInputReader.from_json(
input_shard_state[cls._READER_LIST_PARAM]))
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
dict(str : *). A json-izable version of the remaining InputReader.
"""
return {
self._READER_LIST_PARAM: self._reader_list.to_json()
}
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers.
This method creates a list of input readers, each for one shard. It
attempts to split inputs among readers evenly.
Args:
mapper_spec: model.MapperSpec. Specifies the inputs and additional
parameters to define the behavior of input readers.
Returns:
list(InputReaders). None or [] when no input data can be found.
"""
params = mapper_spec.params
entity_kinds = params.get(cls._ENTITY_KINDS_PARAM)
readers_list = []
for entity_kind in entity_kinds:
new_mapper_spec = copy.deepcopy(mapper_spec)
new_mapper_spec.params['entity_kind'] = entity_kind
readers_list.append(
input_readers.DatastoreInputReader.split_input(
new_mapper_spec))
inputs = []
for reader_list in readers_list:
for reader in reader_list:
inputs.append(MultipleDatastoreEntitiesInputReader(reader))
return inputs