Skip to content

Commit

Permalink
Fix errors reported by new flake8 3.7.3
Browse files Browse the repository at this point in the history
  • Loading branch information
nsoranzo committed Jan 31, 2019
1 parent 498eba2 commit d5a406c
Show file tree
Hide file tree
Showing 13 changed files with 50 additions and 49 deletions.
5 changes: 3 additions & 2 deletions cron/add_manual_builds.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Usage:
python add_manual_builds.py input_file builds.txt chrom_length_dir
"""
from __future__ import print_function

import os
import sys
Expand Down Expand Up @@ -33,11 +34,11 @@ def add_manual_builds(input_file, build_file, chr_dir):
chrs = fields.pop(0).split(",")
except Exception:
chrs = []
print>>build_file_out, build + "\t" + name + " (" + build + ")"
print(build + "\t" + name + " (" + build + ")", file=build_file_out)
if chrs: # create len file if provided chrom lens
chr_len_out = open(os.path.join(chr_dir, build + ".len"), 'w')
for chr in chrs:
print>>chr_len_out, chr.replace("=", "\t")
print(chr.replace("=", "\t"), file=chr_len_out)
chr_len_out.close()
except Exception:
continue
Expand Down
18 changes: 9 additions & 9 deletions lib/galaxy/actions/library.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,15 +126,15 @@ def _upload_dataset(self, trans, library_id, folder_id, replace_dataset=None, **
return output

def _get_server_dir_uploaded_datasets(self, trans, params, full_dir, import_dir_desc, library_bunch, response_code, message):
dir_response = self._get_server_dir_files(params, full_dir, import_dir_desc)
files = dir_response[0]
if not files:
return dir_response
uploaded_datasets = []
for file in files:
name = os.path.basename(file)
uploaded_datasets.append(self._make_library_uploaded_dataset(trans, params, name, file, 'server_dir', library_bunch))
return uploaded_datasets, 200, None
dir_response = self._get_server_dir_files(params, full_dir, import_dir_desc)
files = dir_response[0]
if not files:
return dir_response
uploaded_datasets = []
for file in files:
name = os.path.basename(file)
uploaded_datasets.append(self._make_library_uploaded_dataset(trans, params, name, file, 'server_dir', library_bunch))
return uploaded_datasets, 200, None

def _get_server_dir_files(self, params, full_dir, import_dir_desc):
files = []
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/jobs/dynamic_tool_destination.py
Original file line number Diff line number Diff line change
Expand Up @@ -798,7 +798,7 @@ def validate_destination(app, destination, err_message, err_message_contents,
valid_destination = False
suggestion = None

if destination is 'fail' and err_message is dest_err_tool_rule_dest: # It's a tool rule that is set to fail. It's valid
if destination == 'fail' and err_message is dest_err_tool_rule_dest: # It's a tool rule that is set to fail. It's valid
valid_destination = True
elif app is None:
if destination in destination_list:
Expand Down
6 changes: 3 additions & 3 deletions lib/galaxy/jobs/runners/pulsar.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,9 +318,9 @@ def queue_job(self, job_wrapper):
self.monitor_job(pulsar_job_state)

def __needed_features(self, client):
return {
'remote_metadata': PulsarJobRunner.__remote_metadata(client),
}
return {
'remote_metadata': PulsarJobRunner.__remote_metadata(client),
}

def __prepare_job(self, job_wrapper, job_destination):
"""Build command-line and Pulsar client for this job."""
Expand Down
14 changes: 7 additions & 7 deletions lib/galaxy/managers/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,13 +177,13 @@ def replace_dataset_ids(path, key, value):
d.value == json.dumps(identifier)))
used_ids.append(a.dataset_id)
elif t == 'ldda':
a = aliased(model.JobToInputLibraryDatasetAssociation)
conditions.append(and_(
model.Job.id == a.job_id,
a.name == k,
a.ldda_id == v
))
used_ids.append(a.ldda_id)
a = aliased(model.JobToInputLibraryDatasetAssociation)
conditions.append(and_(
model.Job.id == a.job_id,
a.name == k,
a.ldda_id == v
))
used_ids.append(a.ldda_id)
elif t == 'hdca':
a = aliased(model.JobToInputDatasetCollectionAssociation)
b = aliased(model.HistoryDatasetCollectionAssociation)
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/tools/parameters/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -604,7 +604,7 @@ def visible(self):
return True

def to_param_dict_string(self, value, other_values={}):
if value is '':
if value == '':
return 'None'
lst = ['%s%s' % (self.user_ftp_dir, dataset) for dataset in value]
if self.multiple:
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/tools/parameters/meta.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def expand_workflow_inputs(inputs):
else:
if linked_n is None:
linked_n = nval
elif linked_n != nval or nval is 0:
elif linked_n != nval or nval == 0:
raise exceptions.RequestParameterInvalidException('Failed to match linked batch selections. Please select equal number of data files.')
linked.append(value['values'])
linked_keys.append((step_id, key))
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/util/hash_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def memory_bound_hexdigest(hash_func, path=None, file=None):

try:
for block in iter(lambda: file.read(BLOCK_SIZE), b''):
hasher.update(block)
hasher.update(block)
return hasher.hexdigest()
finally:
file.close()
Expand Down
40 changes: 20 additions & 20 deletions test/api/test_workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -986,19 +986,19 @@ def test_run_runtime_parameters_after_pause(self):
assert len([x for x in content.split("\n") if x]) == 2

def test_run_subworkflow_auto_labels(self):
history_id = self.dataset_populator.new_history()
test_data = """
history_id = self.dataset_populator.new_history()
test_data = """
outer_input:
value: 1.bed
type: File
"""
job_summary = self._run_jobs(NESTED_WORKFLOW_AUTO_LABELS, test_data=test_data, history_id=history_id)
assert len(job_summary.jobs) == 4, "4 jobs expected, got %d jobs" % len(job_summary.jobs)
job_summary = self._run_jobs(NESTED_WORKFLOW_AUTO_LABELS, test_data=test_data, history_id=history_id)
assert len(job_summary.jobs) == 4, "4 jobs expected, got %d jobs" % len(job_summary.jobs)

content = self.dataset_populator.get_history_dataset_content(history_id)
self.assertEqual(
"chrX\t152691446\t152691471\tCCDS14735.1_cds_0_0_chrX_152691447_f\t0\t+\nchrX\t152691446\t152691471\tCCDS14735.1_cds_0_0_chrX_152691447_f\t0\t+\n",
content)
content = self.dataset_populator.get_history_dataset_content(history_id)
self.assertEqual(
"chrX\t152691446\t152691471\tCCDS14735.1_cds_0_0_chrX_152691447_f\t0\t+\nchrX\t152691446\t152691471\tCCDS14735.1_cds_0_0_chrX_152691447_f\t0\t+\n",
content)

@skip_without_tool("cat1")
@skip_without_tool("collection_paired_test")
Expand Down Expand Up @@ -3134,18 +3134,18 @@ def test_invocations_not_accessible_by_different_user_for_published_workflow(sel
self._assert_status_code_is(usage_details_response, 403)

def _invoke_paused_workflow(self, history_id):
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_pause")
workflow_id = self.workflow_populator.create_workflow(workflow)
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
index_map = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(
history_id,
workflow_id,
index_map,
)
return workflow_id, invocation_id
workflow = self.workflow_populator.load_workflow_from_resource("test_workflow_pause")
workflow_id = self.workflow_populator.create_workflow(workflow)
hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
index_map = {
'0': self._ds_entry(hda1),
}
invocation_id = self.__invoke_workflow(
history_id,
workflow_id,
index_map,
)
return workflow_id, invocation_id

def _wait_for_invocation_non_new(self, workflow_id, invocation_id):
target_state_reached = False
Expand Down
2 changes: 1 addition & 1 deletion test/functional/test_toolbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def build_tests(app=None,
# Create a new subclass of ToolTestCase, dynamically adding methods
# named test_tool_XXX that run each test defined in the tool config.
if contains and contains not in tool_id:
continue
continue
name = name_prefix + tool_id.replace(' ', '_')
baseclasses = (baseclass, )
namespace = dict()
Expand Down
2 changes: 1 addition & 1 deletion tools/maf/maf_to_bed.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def __main__():
for c in l:
spec, chrom = maf.src_split(c.src)
if not spec or not chrom:
spec = chrom = c.src
spec = chrom = c.src
if spec not in out_files.keys():
out_files[spec] = open(os.path.join(database_tmp_dir, 'primary_%s_%s_visible_bed_%s' % (output_id, spec, spec)), 'wb+')

Expand Down
2 changes: 1 addition & 1 deletion tools/phenotype_association/senatag.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def check_output(g, tagsnps):

for n in tagsnps:
for m in n.edges:
mysnps.append(m.name)
mysnps.append(m.name)

mysnps = list(set(mysnps))

Expand Down
2 changes: 1 addition & 1 deletion tools/stats/gsummary.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def S3_METHODS(all="key"):
"acosh", "asinh", "atanh", "lgamma", "gamma", "gammaCody", "digamma", "trigamma",
"cumsum", "cumprod", "cummax", "cummin", "c"]
Group_Ops = ["+", "-", "*", "/", "^", "%%", "%/%", "&", "|", "!", "==", "!=", "<", "<=", ">=", ">", "(", ")", "~", ","]
if all is "key":
if all == "key":
return {'Math': Group_Math, 'Ops': Group_Ops}


Expand Down

0 comments on commit d5a406c

Please sign in to comment.