forked from zulip/zulip
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest-backend
executable file
·207 lines (178 loc) · 8.27 KB
/
test-backend
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
#!/usr/bin/env python
from __future__ import print_function
import optparse
import os
import sys
import subprocess
try:
import django
from django.conf import settings
from django.test.utils import get_runner
# We don't actually need typing, but it's a good guard for being
# outside a Zulip virtualenv.
import typing
except ImportError as e:
print("ImportError: {}".format(e))
print("You need to run the Zulip tests inside a Zulip dev environment.")
print("If you are using Vagrant, you can `vagrant ssh` to enter the Vagrant guest.")
sys.exit(1)
if __name__ == "__main__":
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.dirname(TOOLS_DIR))
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from zerver.lib.test_fixtures import is_template_database_current
from tools.lib.test_script import (
get_provisioning_status,
)
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
# "-u" uses unbuffered IO, which is important when wrapping it in subprocess
os.environ['PYTHONUNBUFFERED'] = 'y'
usage = """%prog [options]
test-backend # Runs all backend tests
test-backend zerver.tests.test_bugdown # run all tests in a test module
test-backend zerver/tests/test_bugdown.py # run all tests in a test module
test-backend test_bugdown # run all tests in a test module
test-backend zerver.tests.test_bugdown.BugdownTest # run all tests in a test class
test-backend BugdownTest # run all tests in a test class
test-backend zerver.tests.test_bugdown.BugdownTest.test_inline_youtube # run a single test
test-backend BugdownTest.test_inline_youtube # run a single test"""
parser = optparse.OptionParser(usage)
parser.add_option('--nonfatal-errors', action="store_false", default=True,
dest="fatal_errors", help="Continue past test failures to run all tests")
parser.add_option('--coverage', dest='coverage',
action="store_true",
default=False, help='Compute test coverage.')
parser.add_option('--no-verbose-coverage', dest='verbose_coverage',
action="store_false",
default=True, help='Disable verbose print of coverage report.')
parser.add_option('--profile', dest='profile',
action="store_true",
default=False, help='Profile test runtime.')
parser.add_option('--force', dest='force',
action="store_true",
default=False, help='Run tests despite possible problems.')
parser.add_option('--no-shallow', dest='no_shallow',
action="store_true",
default=False,
help="Don't allow shallow testing of templates (deprecated)")
parser.add_option('--verbose', dest='verbose',
action="store_true",
default=False,
help="Show detailed output")
parser.add_option('--no-generate-fixtures', action="store_false", default=True,
dest="generate_fixtures",
help=("Reduce running time by not calling generate-fixtures. "
"This may cause spurious failures for some tests."))
parser.add_option('--report-slow-tests', dest='report_slow_tests',
action="store_true",
default=False,
help="Show which tests are slowest.")
(options, args) = parser.parse_args()
zerver_test_dir = 'zerver/tests/'
# to transform forward slashes '/' present in the argument into dots '.'
for suite in args:
args[args.index(suite)] = suite.rstrip('/').replace("/", ".")
def rewrite_arguments(search_key):
# type: (str) -> None
for root, dirs, files_names in os.walk(zerver_test_dir, topdown=False):
for file_name in files_names:
# Check for files starting with alphanumeric characters and ending with '.py'
# Ignore backup files if any
if not file_name[0].isalnum() or not file_name.endswith(".py"):
continue
filepath = os.path.join(root, file_name)
for line in open(filepath):
if search_key not in line:
continue
new_suite = filepath.replace(".py", ".") + suite
args[args.index(suite)] = new_suite
return
for suite in args:
if suite[0].isupper() and "test_" in suite:
classname = suite.rsplit('.', 1)[0]
rewrite_arguments(classname)
elif suite[0].isupper():
rewrite_arguments(suite)
for suite in args:
if suite.startswith('test'):
for root, dirs, files_names in os.walk(zerver_test_dir):
for file_name in files_names:
if file_name == suite or file_name == suite + ".py":
new_suite = os.path.join(root, file_name)
args[args.index(suite)] = new_suite
break
for suite in args:
args[args.index(suite)] = suite.replace(".py", "")
# to transform forward slashes '/' introduced by the zerver_test_dir into dots '.'
# taking care of any forward slashes that might be present
for suite in args:
args[args.index(suite)] = suite.replace("/", ".")
full_suite = len(args) == 0
if len(args) == 0:
suites = ["zerver.tests",
"analytics.tests"]
else:
suites = args
if not options.force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print('If you really know what you are doing, use --force to run anyway.')
sys.exit(1)
if options.coverage:
import coverage
cov = coverage.Coverage(omit=["*/zulip-venv-cache/*",
"*/migrations/*",
"*/management/commands/*"])
cov.start()
if options.profile:
import cProfile
prof = cProfile.Profile()
prof.enable()
# This is kind of hacky, but it's the most reliable way
# to make sure instrumentation decorators know the
# setting when they run.
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
# setup() needs to be called after coverage is started to get proper coverage reports of model
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
django.setup()
if options.generate_fixtures:
generate_fixtures_command = [os.path.join(TOOLS_DIR, 'setup', 'generate-fixtures')]
if not is_template_database_current():
generate_fixtures_command.append('--force')
subprocess.call(generate_fixtures_command)
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(suites, fatal_errors=options.fatal_errors,
full_suite=full_suite)
templates_not_rendered = test_runner.get_shallow_tested_templates()
if templates_not_rendered and full_suite:
missed_count = len(templates_not_rendered)
print("\nError: %s templates have no tests!" % (missed_count,))
for template in templates_not_rendered:
print(' {}'.format(template))
print("See zerver/tests/test_templates.py for the exclude list.")
failures = True
if options.coverage:
cov.stop()
cov.save()
if options.verbose_coverage:
print("Printing coverage data")
cov.report(show_missing=False)
cov.html_report(directory='var/coverage')
print("HTML report saved to var/coverage")
if options.profile:
prof.disable()
prof.dump_stats("/tmp/profile.data")
print("Profile data saved to /tmp/profile.data")
print("You can visualize it using e.g. `runsnake /tmp/profile.data`")
if options.report_slow_tests:
from zerver.lib.test_runner import report_slow_tests
# We do this even with failures, since slowness can be
# an important clue as to why tests fail.
report_slow_tests()
if failures:
print('FAILED!')
else:
print('DONE!')
sys.exit(bool(failures))