Skip to content

Commit

Permalink
Clean up attempt # 2 (openedx#1575)
Browse files Browse the repository at this point in the history
* Python code cleanup by the cleanup-python-code Jenkins job.

This pull request was generated by the cleanup-python-code Jenkins job, which ran
```
modernize_travis;modernize_tox;make upgrade;find . -type f -name '*.py' | while read fname; do pyupgrade --exit-zero-even-if-changed --keep-percent-format --py3-plus --py36-plus --py38-plus "$fname"; done
```

The following packages were installed:
`pyupgrade,git+https://github.com/edx/repo-tools.git@5ed136448729ac3bd962bdb5d8fda417dc63efaa`

* Quality Fixes

* version bump and allow failure on django 3

Co-authored-by: edX requirements bot <[email protected]>
  • Loading branch information
M. Zulqarnain and edx-requirements-bot authored Jan 22, 2021
1 parent b24e8ad commit 0d46321
Show file tree
Hide file tree
Showing 115 changed files with 1,355 additions and 1,436 deletions.
21 changes: 13 additions & 8 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,21 @@ language: python
sudo: false

python:
- 3.5
- 3.8

- '3.8'
env:
- TOXENV=django22
- TOXENV=js
- TOXENV=quality

- TOXENV=django22
- TOXENV=django30
- TOXENV=django31
matrix:
allow_failures:
- python: 3.8
env: TOXENV=django30
- python: 3.8
env: TOXENV=django31
before_install:
- "pip install -U pip"
- pip install -U pip
- export AWS_CONFIG_FILE=/dev/null

cache:
Expand All @@ -38,7 +43,7 @@ deploy:
distributions: sdist bdist_wheel
on:
tags: true
condition: "$TOXENV = quality"
python: 3.5
condition: $TOXENV = quality
python: 3.8
password:
secure: F7yrAFt9c56Y/x29pNbI3LMEATc6DPDTqEXs5WDDRwse/JwKe3MSsXRv6ois6JKzWroHQOZu4CKBbtfZ8v4fWv8lT4kwMJzAq8I4tda4qaSWulHiTdefzkR147oW9db2lTAKFOZsV/XUFFsv2sHDK/SQiJ0y+nxTgoMxEILChnw=
10 changes: 5 additions & 5 deletions openassessment/assessment/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,16 @@ class RubricAdmin(admin.ModelAdmin):
def criteria_summary(self, rubric_obj):
"""Short description of criteria for presenting in a list."""
rubric_data = RubricSerializer.serialized_from_cache(rubric_obj)
return u", ".join(
u"{} - {}: {}".format(criterion["name"], criterion['label'], criterion["points_possible"])
return ", ".join(
"{} - {}: {}".format(criterion["name"], criterion['label'], criterion["points_possible"])
for criterion in rubric_data["criteria"]
)

def data(self, rubric_obj):
"""Full JSON string of rubric, indented and HTML formatted."""
rubric_data = RubricSerializer.serialized_from_cache(rubric_obj)
return format_html(
u"<pre>\n{}\n</pre>", json.dumps(rubric_data, sort_keys=True, indent=4))
"<pre>\n{}\n</pre>", json.dumps(rubric_data, sort_keys=True, indent=4))


class PeerWorkflowItemInline(admin.StackedInline):
Expand Down Expand Up @@ -97,15 +97,15 @@ def rubric_link(self, assessment_obj):
args=[assessment_obj.rubric.id]
)
return format_html(
u'<a href="{}">{}</a>', url, assessment_obj.rubric.content_hash)
'<a href="{}">{}</a>', url, assessment_obj.rubric.content_hash)
rubric_link.admin_order_field = 'rubric__content_hash'
rubric_link.short_description = 'Rubric'

def parts_summary(self, assessment_obj):
"""
Returns the parts summary of this assessment as HTML.
"""
return format_html_join("<br/>", u"{}/{} - {} - {}: {} - {} - {}", ((
return format_html_join("<br/>", "{}/{} - {} - {}: {} - {} - {}", ((
part.points_earned,
part.points_possible,
part.criterion.name,
Expand Down
120 changes: 60 additions & 60 deletions openassessment/assessment/api/peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ def submitter_is_finished(submission_uuid, peer_requirements):
return False
except PeerWorkflow.DoesNotExist:
return False
except KeyError:
raise PeerAssessmentRequestError(u'Requirements dict must contain "must_grade" key')
except KeyError as ex:
raise PeerAssessmentRequestError('Requirements dict must contain "must_grade" key') from ex


def assessment_is_finished(submission_uuid, peer_requirements):
Expand Down Expand Up @@ -172,14 +172,14 @@ def on_start(submission_uuid):
# If we get an integrity error, it means someone else has already
# created a workflow for this submission, so we don't need to do anything.
pass
except DatabaseError:
except DatabaseError as ex:
error_message = (
u"An internal error occurred while creating a new peer "
u"workflow for submission {}"
"An internal error occurred while creating a new peer "
"workflow for submission {}"
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex


def get_score(submission_uuid, peer_requirements):
Expand Down Expand Up @@ -306,8 +306,8 @@ def create_assessment(
peer_workflow_item = scorer_workflow.find_active_assessments()
if peer_workflow_item is None:
message = (
u"There are no open assessments associated with the scorer's "
u"submission UUID {}."
"There are no open assessments associated with the scorer's "
"submission UUID {}."
).format(scorer_submission_uuid)
logger.warning(message)
raise PeerAssessmentWorkflowError(message)
Expand All @@ -327,27 +327,27 @@ def create_assessment(

_log_assessment(assessment, scorer_workflow)
return full_assessment_dict(assessment)
except PeerWorkflow.DoesNotExist:
except PeerWorkflow.DoesNotExist as ex:
message = (
u"There is no Peer Workflow associated with the given "
u"submission UUID {}."
"There is no Peer Workflow associated with the given "
"submission UUID {}."
).format(scorer_submission_uuid)
logger.exception(message)
raise PeerAssessmentWorkflowError(message)
except InvalidRubric:
msg = u"The rubric definition is not valid."
raise PeerAssessmentWorkflowError(message) from ex
except InvalidRubric as ex:
msg = "The rubric definition is not valid."
logger.exception(msg)
raise PeerAssessmentRequestError(msg)
except InvalidRubricSelection:
msg = u"Invalid options were selected in the rubric."
raise PeerAssessmentRequestError(msg) from ex
except InvalidRubricSelection as ex:
msg = "Invalid options were selected in the rubric."
logger.warning(msg, exc_info=True)
raise PeerAssessmentRequestError(msg)
except DatabaseError:
raise PeerAssessmentRequestError(msg) from ex
except DatabaseError as ex:
error_message = (
u"An error occurred while creating an assessment by the scorer with this ID: {}"
"An error occurred while creating an assessment by the scorer with this ID: {}"
).format(scorer_id)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex


@transaction.atomic
Expand Down Expand Up @@ -446,12 +446,12 @@ def get_rubric_max_scores(submission_uuid):
criterion["name"]: criterion["points_possible"]
for criterion in rubric_dict["criteria"]
}
except DatabaseError:
except DatabaseError as ex:
error_message = (
u"Error getting rubric options max scores for submission uuid {uuid}"
"Error getting rubric options max scores for submission uuid {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex


def get_assessment_median_scores(submission_uuid):
Expand Down Expand Up @@ -486,12 +486,12 @@ def get_assessment_median_scores(submission_uuid):
return Assessment.get_median_score_dict(scores)
except PeerWorkflow.DoesNotExist:
return {}
except DatabaseError:
except DatabaseError as ex:
error_message = (
u"Error getting assessment median scores for submission {uuid}"
"Error getting assessment median scores for submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex


def has_finished_required_evaluating(submission_uuid, required_assessments):
Expand Down Expand Up @@ -586,12 +586,12 @@ def get_assessments(submission_uuid, limit=None):
score_type=PEER_TYPE
)[:limit]
return serialize_assessments(assessments)
except DatabaseError:
except DatabaseError as ex:
error_message = (
u"Error getting assessments for submission {uuid}"
"Error getting assessments for submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex


def get_submitted_assessments(submission_uuid, limit=None):
Expand Down Expand Up @@ -652,12 +652,12 @@ def get_submitted_assessments(submission_uuid, limit=None):
assessments = Assessment.objects.filter(
pk__in=[item.assessment.pk for item in items])[:limit]
return serialize_assessments(assessments)
except DatabaseError:
except DatabaseError as ex:
error_message = (
u"Couldn't retrieve the assessments completed by the student with submission {uuid}"
"Couldn't retrieve the assessments completed by the student with submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex


def get_submission_to_assess(submission_uuid, graded_by):
Expand Down Expand Up @@ -708,8 +708,8 @@ def get_submission_to_assess(submission_uuid, graded_by):

if not workflow:
raise PeerAssessmentWorkflowError(
u"A Peer Assessment Workflow does not exist for the student "
u"with submission UUID {}".format(submission_uuid)
"A Peer Assessment Workflow does not exist for the student "
"with submission UUID {}".format(submission_uuid)
)

if workflow.is_cancelled:
Expand All @@ -730,16 +730,16 @@ def get_submission_to_assess(submission_uuid, graded_by):
PeerWorkflow.create_item(workflow, peer_submission_uuid)
_log_workflow(peer_submission_uuid, workflow)
return submission_data
except sub_api.SubmissionNotFoundError:
except sub_api.SubmissionNotFoundError as ex:
error_message = (
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
"Could not find a submission with the uuid {} for student {} "
"in the peer workflow."
).format(peer_submission_uuid, workflow.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
raise PeerAssessmentWorkflowError(error_message) from ex
else:
logger.info(
u"No submission found for {} to assess ({}, {})"
"No submission found for {} to assess ({}, {})"
.format(
workflow.student_id,
workflow.course_id,
Expand Down Expand Up @@ -784,13 +784,13 @@ def create_peer_workflow(submission_uuid):
# If we get an integrity error, it means someone else has already
# created a workflow for this submission, so we don't need to do anything.
pass
except DatabaseError:
except DatabaseError as ex:
error_message = (
u"An internal error occurred while creating a new peer "
u"workflow for submission {}"
"An internal error occurred while creating a new peer "
"workflow for submission {}"
).format(submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex


def create_peer_workflow_item(scorer_submission_uuid, submission_uuid):
Expand Down Expand Up @@ -834,13 +834,13 @@ def get_assessment_feedback(submission_uuid):
return AssessmentFeedbackSerializer(feedback).data
except AssessmentFeedback.DoesNotExist:
return None
except DatabaseError:
except DatabaseError as ex:
error_message = (
u"An error occurred retrieving assessment feedback for {}."
"An error occurred retrieving assessment feedback for {}."
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex


def set_assessment_feedback(feedback_dict):
Expand All @@ -865,7 +865,7 @@ def set_assessment_feedback(feedback_dict):
selected_options = feedback_dict.get('options', list())

if feedback_text and len(feedback_text) > AssessmentFeedback.MAXSIZE:
error_message = u"Assessment feedback too large."
error_message = "Assessment feedback too large."
raise PeerAssessmentRequestError(error_message)

try:
Expand All @@ -875,7 +875,7 @@ def set_assessment_feedback(feedback_dict):
if submission_uuid:
feedback, created = AssessmentFeedback.objects.get_or_create(submission_uuid=submission_uuid)
else:
error_message = u"An error occurred creating assessment feedback: bad or missing submission_uuid."
error_message = "An error occurred creating assessment feedback: bad or missing submission_uuid."
logger.error(error_message)
raise PeerAssessmentRequestError(error_message)

Expand All @@ -893,10 +893,10 @@ def set_assessment_feedback(feedback_dict):
# Associate the feedback with scored assessments
assessments = PeerWorkflowItem.get_scored_assessments(submission_uuid)
feedback.assessments.add(*assessments)
except DatabaseError:
msg = u"Error occurred while creating or updating feedback on assessment: {}".format(feedback_dict)
except DatabaseError as ex:
msg = f"Error occurred while creating or updating feedback on assessment: {feedback_dict}"
logger.exception(msg)
raise PeerAssessmentInternalError(msg)
raise PeerAssessmentInternalError(msg) from ex


def _log_assessment(assessment, scorer_workflow):
Expand All @@ -913,9 +913,9 @@ def _log_assessment(assessment, scorer_workflow):
"""
logger.info(
u"Created peer-assessment {assessment_id} for submission "
u"{submission_uuid}, course {course_id}, item {item_id} "
u"with rubric {rubric_content_hash}; scored by {scorer}"
"Created peer-assessment {assessment_id} for submission "
"{submission_uuid}, course {course_id}, item {item_id} "
"with rubric {rubric_content_hash}; scored by {scorer}"
.format(
assessment_id=assessment.id,
submission_uuid=assessment.submission_uuid,
Expand All @@ -937,7 +937,7 @@ def _log_workflow(submission_uuid, workflow):
assessment.
"""
logger.info(
u"Retrieved submission {} ({}, {}) to be assessed by {}"
"Retrieved submission {} ({}, {}) to be assessed by {}"
.format(
submission_uuid,
workflow.course_id,
Expand Down Expand Up @@ -983,11 +983,11 @@ def on_cancel(submission_uuid):
if workflow:
workflow.cancelled_at = timezone.now()
workflow.save()
except (PeerAssessmentWorkflowError, DatabaseError):
except (PeerAssessmentWorkflowError, DatabaseError) as ex:
error_message = (
u"An internal error occurred while cancelling the peer"
u"workflow for submission {}"
"An internal error occurred while cancelling the peer"
"workflow for submission {}"
.format(submission_uuid)
)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message)
raise PeerAssessmentInternalError(error_message) from ex
Loading

0 comments on commit 0d46321

Please sign in to comment.