diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 6d46eaeb3..1ce1ca378 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -3,3 +3,6 @@ # upgrade to black 20.8b1 6fd6fdb381fe3f347627fd517a8f2dba7b0a7029 + +# upgrade to black 23.1, longer lines (100) +7fe8c0739b0515d00daabc7db87bc5fad926d345 diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml index 47df6cbf9..5d552bda0 100644 --- a/.github/workflows/ci-tests.yml +++ b/.github/workflows/ci-tests.yml @@ -25,6 +25,9 @@ jobs: - py-ver-major: 3 py-ver-minor: 6 step: mypy + - py-ver-major: 3 + py-ver-minor: 6 + step: lint env: py-semver: ${{ format('{0}.{1}', matrix.py-ver-major, matrix.py-ver-minor) }} diff --git a/cwltool.Dockerfile b/cwltool.Dockerfile index ecfbd9e22..0193b9f32 100644 --- a/cwltool.Dockerfile +++ b/cwltool.Dockerfile @@ -4,14 +4,15 @@ RUN apk add --no-cache git gcc python3-dev libxml2-dev libxslt-dev libc-dev linu WORKDIR /cwltool COPY . . - -RUN pip install toml -rmypy-requirements.txt -rrequirements.txt -RUN CWLTOOL_USE_MYPYC=1 MYPYPATH=mypy-stubs pip wheel --no-binary schema-salad --wheel-dir=/wheels .[deps] +RUN CWLTOOL_USE_MYPYC=1 MYPYPATH=mypy-stubs pip wheel --no-binary schema-salad \ + --wheel-dir=/wheels .[deps] # --verbose RUN rm /wheels/schema_salad* -RUN pip install black +RUN pip install "black~=22.0" +# galaxy-util 22.1.2 depends on packaging<22, but black 23.x needs packaging>22 RUN SCHEMA_SALAD_USE_MYPYC=1 MYPYPATH=mypy-stubs pip wheel --no-binary schema-salad \ - $(grep schema.salad requirements.txt) black --wheel-dir=/wheels -RUN pip install --force-reinstall --no-index --no-warn-script-location --root=/pythonroot/ /wheels/*.whl + $(grep schema.salad requirements.txt) "black~=22.0" --wheel-dir=/wheels # --verbose +RUN pip install --force-reinstall --no-index --no-warn-script-location \ + --root=/pythonroot/ /wheels/*.whl # --force-reinstall to install our new mypyc compiled schema-salad package FROM python:3.11-alpine as module diff --git a/cwltool/argparser.py b/cwltool/argparser.py index a7e58c463..5439d2bd2 100644 --- a/cwltool/argparser.py +++ b/cwltool/argparser.py @@ -126,9 +126,7 @@ def arg_parser() -> argparse.ArgumentParser: help="Path prefix for temporary directories. If --tmpdir-prefix is not " "provided, then the prefix for temporary directories is influenced by " "the value of the TMPDIR, TEMP, or TMP environment variables. Taking " - "those into consideration, the current default is {}.".format( - DEFAULT_TMP_PREFIX - ), + "those into consideration, the current default is {}.".format(DEFAULT_TMP_PREFIX), default=DEFAULT_TMP_PREFIX, ) @@ -318,12 +316,8 @@ def arg_parser() -> argparse.ArgumentParser: action="store_true", help="Combine components into single document and print.", ) - printgroup.add_argument( - "--version", action="store_true", help="Print version and exit" - ) - printgroup.add_argument( - "--validate", action="store_true", help="Validate CWL document only." - ) + printgroup.add_argument("--version", action="store_true", help="Print version and exit") + printgroup.add_argument("--validate", action="store_true", help="Validate CWL document only.") printgroup.add_argument( "--print-supported-versions", action="store_true", @@ -384,12 +378,8 @@ def arg_parser() -> argparse.ArgumentParser: volumegroup = parser.add_mutually_exclusive_group() volumegroup.add_argument("--verbose", action="store_true", help="Default logging") - volumegroup.add_argument( - "--quiet", action="store_true", help="Only print warnings and errors." - ) - volumegroup.add_argument( - "--debug", action="store_true", help="Print even more logging" - ) + volumegroup.add_argument("--quiet", action="store_true", help="Only print warnings and errors.") + volumegroup.add_argument("--debug", action="store_true", help="Print even more logging") parser.add_argument( "--write-summary", @@ -494,12 +484,9 @@ def arg_parser() -> argparse.ArgumentParser: "Default root directory used by dependency resolvers configuration." ) use_biocontainers_help = ( - "Use biocontainers for tools without an " - "explicitly annotated Docker container." - ) - conda_dependencies = ( - "Short cut to use Conda to resolve 'SoftwareRequirement' packages." + "Use biocontainers for tools without an " "explicitly annotated Docker container." ) + conda_dependencies = "Short cut to use Conda to resolve 'SoftwareRequirement' packages." parser.add_argument( "--beta-dependency-resolvers-configuration", @@ -522,9 +509,7 @@ def arg_parser() -> argparse.ArgumentParser: action="store_true", ) - parser.add_argument( - "--tool-help", action="store_true", help="Print command line help for tool" - ) + parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool") parser.add_argument( "--relative-deps", @@ -537,8 +522,7 @@ def arg_parser() -> argparse.ArgumentParser: parser.add_argument( "--enable-dev", action="store_true", - help="Enable loading and running unofficial development versions of " - "the CWL standards.", + help="Enable loading and running unofficial development versions of " "the CWL standards.", default=False, ) @@ -647,8 +631,7 @@ def arg_parser() -> argparse.ArgumentParser: "--relax-path-checks", action="store_true", default=False, - help="Relax requirements on path names to permit " - "spaces and hash characters.", + help="Relax requirements on path names to permit " "spaces and hash characters.", dest="relax_path_checks", ) @@ -933,9 +916,7 @@ def add_argument( fieldtype, records, fielddescription, - default=default.get(shortname(field["name"]), None) - if default - else None, + default=default.get(shortname(field["name"]), None) if default else None, input_required=required, ) return diff --git a/cwltool/builder.py b/cwltool/builder.py index 3fc4a34af..348700e6d 100644 --- a/cwltool/builder.py +++ b/cwltool/builder.py @@ -191,9 +191,7 @@ def bind_input( bindings: List[MutableMapping[str, Union[str, List[int]]]] = [] binding: Union[MutableMapping[str, Union[str, List[int]]], CommentedMap] = {} value_from_expression = False - if "inputBinding" in schema and isinstance( - schema["inputBinding"], MutableMapping - ): + if "inputBinding" in schema and isinstance(schema["inputBinding"], MutableMapping): binding = CommentedMap(schema["inputBinding"].items()) bp = list(aslist(lead_pos)) @@ -303,8 +301,7 @@ def bind_input( else: schema["type"] = "record" schema["fields"] = [ - {"name": field_name, "type": "Any"} - for field_name in datum.keys() + {"name": field_name, "type": "Any"} for field_name in datum.keys() ] elif isinstance(datum, list): schema["type"] = "array" @@ -378,14 +375,10 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType: debug, ): try: - with self.fs_access.open( - cast(str, datum["location"]), "rb" - ) as f2: + with self.fs_access.open(cast(str, datum["location"]), "rb") as f2: datum["contents"] = content_limit_respected_read(f2) except Exception as e: - raise Exception( - "Reading {}\n{}".format(datum["location"], e) - ) from e + raise Exception("Reading {}\n{}".format(datum["location"], e)) from e if "secondaryFiles" in schema: if "secondaryFiles" not in datum: @@ -398,13 +391,8 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType: for num, sf_entry in enumerate(sf_schema): if "required" in sf_entry and sf_entry["required"] is not None: - required_result = self.do_eval( - sf_entry["required"], context=datum - ) - if not ( - isinstance(required_result, bool) - or required_result is None - ): + required_result = self.do_eval(sf_entry["required"], context=datum) + if not (isinstance(required_result, bool) or required_result is None): if sf_schema == schema["secondaryFiles"]: sf_item: Any = sf_schema[num] else: @@ -425,9 +413,7 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType: if "$(" in sf_entry["pattern"] or "${" in sf_entry["pattern"]: sfpath = self.do_eval(sf_entry["pattern"], context=datum) else: - sfpath = substitute( - cast(str, datum["basename"]), sf_entry["pattern"] - ) + sfpath = substitute(cast(str, datum["basename"]), sf_entry["pattern"]) for sfname in aslist(sfpath): if not sfname: @@ -438,8 +424,7 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType: d_location = cast(str, datum["location"]) if "/" in d_location: sf_location = ( - d_location[0 : d_location.rindex("/") + 1] - + sfname + d_location[0 : d_location.rindex("/") + 1] + sfname ) else: sf_location = d_location + sfname @@ -462,9 +447,7 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType: datum["secondaryFiles"], ): if not d.get("basename"): - d["basename"] = d["location"][ - d["location"].rindex("/") + 1 : - ] + d["basename"] = d["location"][d["location"].rindex("/") + 1 :] if d["basename"] == sfbasename: found = True @@ -488,9 +471,7 @@ def addsf( ), sfname, ) - elif discover_secondaryFiles and self.fs_access.exists( - sf_location - ): + elif discover_secondaryFiles and self.fs_access.exists(sf_location): addsf( cast( MutableSequence[CWLObjectType], @@ -550,9 +531,7 @@ def addsf( ).makeError(message) evaluated_format = cast(List[str], eval_format) else: - raise SourceLine( - schema, "format", WorkflowException, debug - ).makeError( + raise SourceLine(schema, "format", WorkflowException, debug).makeError( "An expression in the 'format' field must " "evaluate to a string, or list of strings. " "However the type of the expression result was " @@ -642,9 +621,7 @@ def generate_arg(self, binding: CWLObjectType) -> List[str]: WorkflowException, debug, ): - raise WorkflowException( - "'separate' option can not be specified without prefix" - ) + raise WorkflowException("'separate' option can not be specified without prefix") argl: MutableSequence[CWLOutputType] = [] if isinstance(value, MutableSequence): @@ -653,9 +630,7 @@ def generate_arg(self, binding: CWLObjectType) -> List[str]: argl = [itemSeparator.join([self.tostr(v) for v in value])] elif binding.get("valueFrom"): value = [self.tostr(v) for v in value] - return cast(List[str], ([prefix] if prefix else [])) + cast( - List[str], value - ) + return cast(List[str], ([prefix] if prefix else [])) + cast(List[str], value) elif prefix and value: return [prefix] else: diff --git a/cwltool/checker.py b/cwltool/checker.py index 5e25e948b..7184c2666 100644 --- a/cwltool/checker.py +++ b/cwltool/checker.py @@ -59,9 +59,7 @@ def check_types( None, ) if linkMerge == "merge_flattened": - return check_types( - merge_flatten_type(_get_type(srctype)), _get_type(sinktype), None, None - ) + return check_types(merge_flatten_type(_get_type(srctype)), _get_type(sinktype), None, None) raise WorkflowException(f"Unrecognized linkMerge enum {linkMerge!r}") @@ -74,9 +72,7 @@ def merge_flatten_type(src: SinkType) -> CWLOutputType: return {"items": src, "type": "array"} -def can_assign_src_to_sink( - src: SinkType, sink: Optional[SinkType], strict: bool = False -) -> bool: +def can_assign_src_to_sink(src: SinkType, sink: Optional[SinkType], strict: bool = False) -> bool: """ Check for identical type specifications, ignoring extra keys like inputBinding. @@ -104,9 +100,7 @@ def can_assign_src_to_sink( for sinksf in cast(List[CWLObjectType], sink.get("secondaryFiles", [])): if not [ 1 - for srcsf in cast( - List[CWLObjectType], src.get("secondaryFiles", []) - ) + for srcsf in cast(List[CWLObjectType], src.get("secondaryFiles", [])) if sinksf == srcsf ]: if strict: @@ -122,9 +116,7 @@ def can_assign_src_to_sink( return False return True for this_src in src: - if this_src != "null" and can_assign_src_to_sink( - cast(SinkType, this_src), sink - ): + if this_src != "null" and can_assign_src_to_sink(cast(SinkType, this_src), sink): return True return False if isinstance(sink, MutableSequence): @@ -135,9 +127,7 @@ def can_assign_src_to_sink( return bool(src == sink) -def _compare_records( - src: CWLObjectType, sink: CWLObjectType, strict: bool = False -) -> bool: +def _compare_records(src: CWLObjectType, sink: CWLObjectType, strict: bool = False) -> bool: """ Compare two records, ensuring they have compatible fields. @@ -219,9 +209,7 @@ def static_checker( sink = warning.sink linkMerge = warning.linkMerge sinksf = sorted( - p["pattern"] - for p in sink.get("secondaryFiles", []) - if p.get("required", True) + p["pattern"] for p in sink.get("secondaryFiles", []) if p.get("required", True) ) srcsf = sorted(p["pattern"] for p in src.get("secondaryFiles", [])) # Every secondaryFile required by the sink, should be declared @@ -233,16 +221,13 @@ def static_checker( missing, ) msg3 = SourceLine(src, "id").makeError( - "source '%s' does not provide those secondaryFiles." - % (shortname(src["id"])) + "source '%s' does not provide those secondaryFiles." % (shortname(src["id"])) ) msg4 = SourceLine(src.get("_tool_entry", src), "secondaryFiles").makeError( "To resolve, add missing secondaryFiles patterns to definition of '%s' or" % (shortname(src["id"])) ) - msg5 = SourceLine( - sink.get("_tool_entry", sink), "secondaryFiles" - ).makeError( + msg5 = SourceLine(sink.get("_tool_entry", sink), "secondaryFiles").makeError( "mark missing secondaryFiles in definition of '%s' as optional." % shortname(sink["id"]) ) @@ -303,17 +288,14 @@ def static_checker( ) + "\n" + SourceLine(sink, "type").makeError( - " with sink '%s' of type %s" - % (shortname(sink["id"]), json_dumps(sink["type"])) + " with sink '%s' of type %s" % (shortname(sink["id"]), json_dumps(sink["type"])) ) ) if extra_message is not None: msg += "\n" + SourceLine(sink).makeError(" " + extra_message) if linkMerge is not None: - msg += "\n" + SourceLine(sink).makeError( - " source has linkMerge method %s" % linkMerge - ) + msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge) exception_msgs.append(msg) for sink in step_inputs: @@ -358,7 +340,6 @@ def check_all_types( validation = {"warning": [], "exception": []} # type: Dict[str, List[SrcSink]] for sink in sinks: if sourceField in sink: - valueFrom = cast(Optional[str], sink.get("valueFrom")) pickValue = cast(Optional[str], sink.get("pickValue")) @@ -371,11 +352,7 @@ def check_all_types( Optional[str], sink.get( "linkMerge", - ( - "merge_nested" - if len(cast(Sized, sink[sourceField])) > 1 - else None - ), + ("merge_nested" if len(cast(Sized, sink[sourceField])) > 1 else None), ), ) # type: Optional[str] @@ -385,10 +362,7 @@ def check_all_types( srcs_of_sink = [] # type: List[CWLObjectType] for parm_id in cast(MutableSequence[str], sink[sourceField]): srcs_of_sink += [src_dict[parm_id]] - if ( - is_conditional_step(param_to_step, parm_id) - and pickValue is None - ): + if is_conditional_step(param_to_step, parm_id) and pickValue is None: validation["warning"].append( SrcSink( src_dict[parm_id], @@ -490,9 +464,7 @@ def get_dependency_tree(step_inputs: List[CWLObjectType]) -> Dict[str, List[str] for step_input in step_inputs: if "source" in step_input: if isinstance(step_input["source"], list): - vertices_in = [ - get_step_id(cast(str, src)) for src in step_input["source"] - ] + vertices_in = [get_step_id(cast(str, src)) for src in step_input["source"]] else: vertices_in = [get_step_id(cast(str, step_input["source"]))] vertex_out = get_step_id(cast(str, step_input["id"])) @@ -542,9 +514,7 @@ def is_conditional_step(param_to_step: Dict[str, CWLObjectType], parm_id: str) - return False -def is_all_output_method_loop_step( - param_to_step: Dict[str, CWLObjectType], parm_id: str -) -> bool: +def is_all_output_method_loop_step(param_to_step: Dict[str, CWLObjectType], parm_id: str) -> bool: """Check if a step contains a http://commonwl.org/cwltool#Loop requirement with `all` outputMethod.""" source_step: Optional[MutableMapping[str, Any]] = param_to_step.get(parm_id) if source_step is not None: diff --git a/cwltool/command_line_tool.py b/cwltool/command_line_tool.py index 898a5a842..901ad0436 100644 --- a/cwltool/command_line_tool.py +++ b/cwltool/command_line_tool.py @@ -243,9 +243,7 @@ def remove_path(f): # type: (CWLObjectType) -> None del f["path"] -def revmap_file( - builder: Builder, outdir: str, f: CWLObjectType -) -> Optional[CWLObjectType]: +def revmap_file(builder: Builder, outdir: str, f: CWLObjectType) -> Optional[CWLObjectType]: """ Remap a file from internal path to external path. @@ -288,9 +286,7 @@ def revmap_file( ) revmap_f = builder.pathmapper.reversemap(path) - if revmap_f and not builder.pathmapper.mapper(revmap_f[0]).type.startswith( - "Writable" - ): + if revmap_f and not builder.pathmapper.mapper(revmap_f[0]).type.startswith("Writable"): f["location"] = revmap_f[1] elif ( uripath == outdir @@ -353,9 +349,7 @@ def run( ) -def check_adjust( - accept_re: Pattern[str], builder: Builder, file_o: CWLObjectType -) -> CWLObjectType: +def check_adjust(accept_re: Pattern[str], builder: Builder, file_o: CWLObjectType) -> CWLObjectType: """ Map files to assigned path inside a container. @@ -363,9 +357,7 @@ def check_adjust( doesn't reach everything in builder.bindings """ if not builder.pathmapper: - raise ValueError( - "Do not call check_adjust using a builder that doesn't have a pathmapper." - ) + raise ValueError("Do not call check_adjust using a builder that doesn't have a pathmapper.") file_o["path"] = path = builder.pathmapper.mapper(cast(str, file_o["location"]))[1] basename = cast(str, file_o.get("basename")) dn, bn = os.path.split(path) @@ -393,9 +385,7 @@ def check_valid_locations(fs_access: StdFsAccess, ob: CWLObjectType) -> None: if ob["class"] == "File" and not fs_access.isfile(location): raise ValidationException("Does not exist or is not a File: '%s'" % location) if ob["class"] == "Directory" and not fs_access.isdir(location): - raise ValidationException( - "Does not exist or is not a Directory: '%s'" % location - ) + raise ValidationException("Does not exist or is not a Directory: '%s'" % location) OutputPortsType = Dict[str, Optional[CWLOutputType]] @@ -412,9 +402,7 @@ def __init__(self, msg: str, port: CWLObjectType, **kwargs: Any) -> None: class CommandLineTool(Process): - def __init__( - self, toolpath_object: CommentedMap, loadingContext: LoadingContext - ) -> None: + def __init__(self, toolpath_object: CommentedMap, loadingContext: LoadingContext) -> None: """Initialize this CommandLineTool.""" super().__init__(toolpath_object, loadingContext) self.prov_obj = loadingContext.prov_obj @@ -460,18 +448,14 @@ def make_job_runner(self, runtimeContext: RuntimeContext) -> Type[JobBase]: _logger.warning( "MPI has been required while Docker is hinted, discarding Docker hint(s)" ) - self.hints = [ - h for h in self.hints if h["class"] != "DockerRequirement" - ] + self.hints = [h for h in self.hints if h["class"] != "DockerRequirement"] return CommandLineJob else: if dockerRequired: _logger.warning( "Docker has been required while MPI is hinted, discarding MPI hint(s)" ) - self.hints = [ - h for h in self.hints if h["class"] != MPIRequirementName - ] + self.hints = [h for h in self.hints if h["class"] != MPIRequirementName] else: raise UnsupportedRequirement( "Both Docker and MPI have been hinted - don't know what to do" @@ -495,13 +479,10 @@ def make_path_mapper( ) -> PathMapper: return PathMapper(reffiles, runtimeContext.basedir, stagedir, separateDirs) - def updatePathmap( - self, outdir: str, pathmap: PathMapper, fn: CWLObjectType - ) -> None: + def updatePathmap(self, outdir: str, pathmap: PathMapper, fn: CWLObjectType) -> None: + """Update a PathMapper with a CWL File or Directory object.""" if not isinstance(fn, MutableMapping): - raise WorkflowException( - "Expected File or Directory object, was %s" % type(fn) - ) + raise WorkflowException("Expected File or Directory object, was %s" % type(fn)) basename = cast(str, fn["basename"]) if "location" in fn: location = cast(str, fn["location"]) @@ -516,9 +497,7 @@ def updatePathmap( for sf in cast(List[CWLObjectType], fn.get("secondaryFiles", [])): self.updatePathmap(outdir, pathmap, sf) for ls in cast(List[CWLObjectType], fn.get("listing", [])): - self.updatePathmap( - os.path.join(outdir, cast(str, fn["basename"])), pathmap, ls - ) + self.updatePathmap(os.path.join(outdir, cast(str, fn["basename"])), pathmap, ls) def _initialworkdir(self, j: JobBase, builder: Builder) -> None: initialWorkdir, _ = self.get_requirement("InitialWorkDirRequirement") @@ -544,9 +523,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: if not isinstance(ls_evaluated, MutableSequence): fail = ls_evaluated else: - ls_evaluated2 = cast( - MutableSequence[Union[None, CWLOutputType]], ls_evaluated - ) + ls_evaluated2 = cast(MutableSequence[Union[None, CWLOutputType]], ls_evaluated) for entry in ls_evaluated2: if entry == None: # noqa if classic_dirent: @@ -602,9 +579,9 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: message += "; null; or arrays of File or Directory objects. " message += f"Got {fail!r} among the results from " message += f"{initialWorkdir['listing'].strip()!r}." + fail_suffix - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError(message) + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( + message + ) ls = cast(List[CWLObjectType], ls_evaluated) else: # "listing" is an array of either expressions or Dirent so @@ -623,9 +600,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: if isinstance(entry, MutableSequence): if classic_listing: - raise SourceLine( - t, "entry", WorkflowException, debug - ).makeError( + raise SourceLine(t, "entry", WorkflowException, debug).makeError( "'entry' expressions are not allowed to evaluate " "to an array of Files or Directories until CWL " "v1.2. Consider using 'cwl-upgrader' to upgrade " @@ -637,9 +612,10 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: filelist = True for e in entry: - if not isinstance(e, MutableMapping) or e.get( - "class" - ) not in ("File", "Directory"): + if not isinstance(e, MutableMapping) or e.get("class") not in ( + "File", + "Directory", + ): filelist = False break @@ -667,9 +643,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: et["entry"] = entry else: if classic_dirent: - raise SourceLine( - t, "entry", WorkflowException, debug - ).makeError( + raise SourceLine(t, "entry", WorkflowException, debug).makeError( "'entry' expression resulted in " "something other than number, object or " "array besides a single File or Dirent object. " @@ -713,11 +687,8 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: for i, t2 in enumerate(ls): if not isinstance(t2, Mapping): - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError( - "Entry at index %s of listing is not a record, was %s" - % (i, type(t2)) + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( + "Entry at index %s of listing is not a record, was %s" % (i, type(t2)) ) if "entry" not in t2: @@ -726,9 +697,9 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: # Dirent if isinstance(t2["entry"], str): if not t2["entryname"]: - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError("Entry at index %s of listing missing entryname" % (i)) + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( + "Entry at index %s of listing missing entryname" % (i) + ) ls[i] = { "class": "File", "basename": t2["entryname"], @@ -738,17 +709,12 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: continue if not isinstance(t2["entry"], Mapping): - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError( - "Entry at index %s of listing is not a record, was %s" - % (i, type(t2["entry"])) + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( + "Entry at index %s of listing is not a record, was %s" % (i, type(t2["entry"])) ) if t2["entry"].get("class") not in ("File", "Directory"): - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError( + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( "Entry at index %s of listing is not a File or Directory object, was %s" % (i, t2) ) @@ -765,9 +731,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: for i, t3 in enumerate(ls): if t3.get("class") not in ("File", "Directory"): # Check that every item is a File or Directory object now - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError( + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( f"Entry at index {i} of listing is not a Dirent, File or " f"Directory object, was {t2}." ) @@ -776,20 +740,16 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: basename = os.path.normpath(cast(str, t3["basename"])) t3["basename"] = basename if basename.startswith("../"): - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError( + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( f"Name {basename!r} at index {i} of listing is invalid, " "cannot start with '../'" ) if basename.startswith("/"): # only if DockerRequirement in requirements - if cwl_version and ORDERED_VERSIONS.index( - cwl_version - ) < ORDERED_VERSIONS.index("v1.2.0-dev4"): - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError( + if cwl_version and ORDERED_VERSIONS.index(cwl_version) < ORDERED_VERSIONS.index( + "v1.2.0-dev4" + ): + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( f"Name {basename!r} at index {i} of listing is invalid, " "paths starting with '/' are only permitted in CWL 1.2 " "and later. Consider changing the absolute path to a relative " @@ -799,9 +759,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: req, is_req = self.get_requirement("DockerRequirement") if is_req is not True: - raise SourceLine( - initialWorkdir, "listing", WorkflowException, debug - ).makeError( + raise SourceLine(initialWorkdir, "listing", WorkflowException, debug).makeError( f"Name {basename!r} at index {i} of listing is invalid, " "name can only start with '/' when DockerRequirement " "is in 'requirements'." @@ -812,9 +770,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None: for entry in ls: if "basename" in entry: basename = cast(str, entry["basename"]) - entry["dirname"] = os.path.join( - builder.outdir, os.path.dirname(basename) - ) + entry["dirname"] = os.path.join(builder.outdir, os.path.dirname(basename)) entry["basename"] = os.path.basename(basename) normalizeFilesDirs(entry) self.updatePathmap( @@ -846,13 +802,10 @@ def job( output_callbacks: Optional[OutputCallbackType], runtimeContext: RuntimeContext, ) -> Generator[Union[JobBase, CallbackJob], None, None]: - workReuse, _ = self.get_requirement("WorkReuse") enableReuse = workReuse.get("enableReuse", True) if workReuse else True - jobname = uniquename( - runtimeContext.name or shortname(self.tool.get("id", "job")) - ) + jobname = uniquename(runtimeContext.name or shortname(self.tool.get("id", "job"))) if runtimeContext.cachedir and enableReuse: cachecontext = runtimeContext.copy() cachecontext.outdir = "/out" @@ -865,9 +818,7 @@ def job( cachebuilder.stagedir, separateDirs=False, ) - _check_adjust = partial( - check_adjust, self.path_check_mode.value, cachebuilder - ) + _check_adjust = partial(check_adjust, self.path_check_mode.value, cachebuilder) _checksum = partial( compute_checksums, runtimeContext.make_fs_access(runtimeContext.basedir), @@ -877,22 +828,13 @@ def job( ("File", "Directory"), _check_adjust, ) - visit_class( - [cachebuilder.files, cachebuilder.bindings], ("File"), _checksum - ) + visit_class([cachebuilder.files, cachebuilder.bindings], ("File"), _checksum) - cmdline = flatten( - list(map(cachebuilder.generate_arg, cachebuilder.bindings)) - ) + cmdline = flatten(list(map(cachebuilder.generate_arg, cachebuilder.bindings))) docker_req, _ = self.get_requirement("DockerRequirement") if docker_req is not None and runtimeContext.use_container: - dockerimg = docker_req.get("dockerImageId") or docker_req.get( - "dockerPull" - ) - elif ( - runtimeContext.default_container is not None - and runtimeContext.use_container - ): + dockerimg = docker_req.get("dockerImageId") or docker_req.get("dockerPull") + elif runtimeContext.default_container is not None and runtimeContext.use_container: dockerimg = runtimeContext.default_container else: dockerimg = None @@ -953,9 +895,7 @@ def remove_prefix(s: str, prefix: str) -> str: keydictstr = json_dumps(keydict, separators=(",", ":"), sort_keys=True) cachekey = hashlib.md5(keydictstr.encode("utf-8")).hexdigest() # nosec - _logger.debug( - "[job %s] keydictstr is %s -> %s", jobname, keydictstr, cachekey - ) + _logger.debug("[job %s] keydictstr is %s -> %s", jobname, keydictstr, cachekey) jobcache = os.path.join(runtimeContext.cachedir, cachekey) @@ -975,9 +915,7 @@ def remove_prefix(s: str, prefix: str) -> str: if os.path.isdir(jobcache) and jobstatus == "success": if docker_req and runtimeContext.use_container: - cachebuilder.outdir = ( - runtimeContext.docker_outdir or random_outdir() - ) + cachebuilder.outdir = runtimeContext.docker_outdir or random_outdir() else: cachebuilder.outdir = jobcache @@ -987,9 +925,7 @@ def remove_prefix(s: str, prefix: str) -> str: jobcachelock.close() return else: - _logger.info( - "[job %s] Output of job will be cached in %s", jobname, jobcache - ) + _logger.info("[job %s] Output of job will be cached in %s", jobname, jobcache) # turn shared lock into an exclusive lock since we'll # be writing the cache directory @@ -1042,22 +978,16 @@ def update_status_output_callback( "[job %s] initializing from %s%s", j.name, self.tool.get("id", ""), - " as part of %s" % runtimeContext.part_of - if runtimeContext.part_of - else "", + " as part of %s" % runtimeContext.part_of if runtimeContext.part_of else "", ) _logger.debug("[job %s] %s", j.name, json_dumps(builder.job, indent=4)) - builder.pathmapper = self.make_path_mapper( - reffiles, builder.stagedir, runtimeContext, True - ) + builder.pathmapper = self.make_path_mapper(reffiles, builder.stagedir, runtimeContext, True) builder.requirements = j.requirements _check_adjust = partial(check_adjust, self.path_check_mode.value, builder) - visit_class( - [builder.files, builder.bindings], ("File", "Directory"), _check_adjust - ) + visit_class([builder.files, builder.bindings], ("File", "Directory"), _check_adjust) self._initialworkdir(j, builder) @@ -1066,10 +996,7 @@ def update_status_output_callback( "[job %s] path mappings is %s", j.name, json_dumps( - { - p: builder.pathmapper.mapper(p) - for p in builder.pathmapper.files() - }, + {p: builder.pathmapper.mapper(p) for p in builder.pathmapper.files()}, indent=4, ), ) @@ -1213,9 +1140,7 @@ def register_reader(f: CWLObjectType) -> None: if "${" in env_value_field or "$(" in env_value_field: env_value_eval = builder.do_eval(env_value_field) if not isinstance(env_value_eval, str): - raise SourceLine( - evr["envDef"], eindex, WorkflowException, debug - ).makeError( + raise SourceLine(evr["envDef"], eindex, WorkflowException, debug).makeError( "'envValue expression must evaluate to a str. " f"Got {env_value_eval!r} for expression {env_value_field!r}." ) @@ -1259,9 +1184,7 @@ def register_reader(f: CWLObjectType) -> None: if isinstance(np, str): np_eval = builder.do_eval(np) if not isinstance(np_eval, int): - raise SourceLine( - mpi, "processes", WorkflowException, debug - ).makeError( + raise SourceLine(mpi, "processes", WorkflowException, debug).makeError( f"{MPIRequirementName} needs 'processes' expression to " f"evaluate to an int, got {np_eval!r} for expression {np!r}." ) @@ -1298,7 +1221,6 @@ def collect_output_ports( ) else: for i, port in enumerate(ports): - with SourceLine( ports, i, @@ -1327,9 +1249,7 @@ def collect_output_ports( if compute_checksum: adjustFileObjs(ret, partial(compute_checksums, fs_access)) - expected_schema = cast( - Schema, self.names.get_name("outputs_record_schema", None) - ) + expected_schema = cast(Schema, self.names.get_name("outputs_record_schema", None)) validate_ex( expected_schema, ret, @@ -1342,10 +1262,7 @@ def collect_output_ports( return ret if ret is not None else {} except ValidationException as e: raise WorkflowException( - "Error validating output record. " - + str(e) - + "\n in " - + json_dumps(ret, indent=4) + "Error validating output record. " + str(e) + "\n in " + json_dumps(ret, indent=4) ) from e finally: if builder.mutation_manager and readers: @@ -1400,9 +1317,7 @@ def collect_output( elif gb == ".": gb = outdir elif gb.startswith("/"): - raise WorkflowException( - "glob patterns must not start with '/'" - ) + raise WorkflowException("glob patterns must not start with '/'") try: prefix = fs_access.glob(outdir) sorted_glob_result = sorted( @@ -1415,38 +1330,26 @@ def collect_output( "location": g, "path": fs_access.join( builder.outdir, - urllib.parse.unquote( - g[len(prefix[0]) + 1 :] - ), + urllib.parse.unquote(g[len(prefix[0]) + 1 :]), ), "basename": decoded_basename, - "nameroot": os.path.splitext(decoded_basename)[ - 0 - ], - "nameext": os.path.splitext(decoded_basename)[ - 1 - ], - "class": "File" - if fs_access.isfile(g) - else "Directory", + "nameroot": os.path.splitext(decoded_basename)[0], + "nameext": os.path.splitext(decoded_basename)[1], + "class": "File" if fs_access.isfile(g) else "Directory", } for g, decoded_basename in zip( sorted_glob_result, map( - lambda x: os.path.basename( - urllib.parse.unquote(x) - ), + lambda x: os.path.basename(urllib.parse.unquote(x)), sorted_glob_result, ), ) ] ) - except (OSError) as e: + except OSError as e: _logger.warning(str(e)) except Exception: - _logger.error( - "Unexpected error from fs_access", exc_info=True - ) + _logger.error("Unexpected error from fs_access", exc_info=True) raise for files in cast(List[Dict[str, Optional[CWLOutputType]]], r): @@ -1458,16 +1361,12 @@ def collect_output( get_listing(fs_access, files, (ll == "deep_listing")) else: if binding.get("loadContents"): - with fs_access.open( - cast(str, rfile["location"]), "rb" - ) as f: - files["contents"] = content_limit_respected_read_bytes( - f - ).decode("utf-8") + with fs_access.open(cast(str, rfile["location"]), "rb") as f: + files["contents"] = content_limit_respected_read_bytes(f).decode( + "utf-8" + ) if compute_checksum: - with fs_access.open( - cast(str, rfile["location"]), "rb" - ) as f: + with fs_access.open(cast(str, rfile["location"]), "rb") as f: checksum = hashlib.sha1() # nosec contents = f.read(1024 * 1024) while contents != b"": @@ -1488,9 +1387,7 @@ def collect_output( if "outputEval" in binding: with SourceLine(binding, "outputEval", WorkflowException, debug): - result = builder.do_eval( - cast(CWLOutputType, binding["outputEval"]), context=r - ) + result = builder.do_eval(cast(CWLOutputType, binding["outputEval"]), context=r) else: result = cast(CWLOutputType, r) @@ -1515,9 +1412,7 @@ def collect_output( for primary in aslist(result): if isinstance(primary, MutableMapping): primary.setdefault("secondaryFiles", []) - pathprefix = primary["path"][ - 0 : primary["path"].rindex(os.sep) + 1 - ] + pathprefix = primary["path"][0 : primary["path"].rindex(os.sep) + 1] for sf in aslist(schema["secondaryFiles"]): if "required" in sf: with SourceLine( @@ -1545,23 +1440,16 @@ def collect_output( sf_required = False if "$(" in sf["pattern"] or "${" in sf["pattern"]: - sfpath = builder.do_eval( - sf["pattern"], context=primary - ) + sfpath = builder.do_eval(sf["pattern"], context=primary) else: - sfpath = substitute( - primary["basename"], sf["pattern"] - ) + sfpath = substitute(primary["basename"], sf["pattern"]) for sfitem in aslist(sfpath): if not sfitem: continue if isinstance(sfitem, str): sfitem = {"path": pathprefix + sfitem} - if ( - not fs_access.exists(sfitem["path"]) - and sf_required - ): + if not fs_access.exists(sfitem["path"]) and sf_required: raise WorkflowException( "Missing required secondary file '%s'" % (sfitem["path"]) @@ -1587,9 +1475,9 @@ def collect_output( ) if isinstance(result, list): message += f" 'self' had the value of the index {index} result: {primary!r}." - raise SourceLine( - schema, "format", WorkflowException, debug - ).makeError(message) + raise SourceLine(schema, "format", WorkflowException, debug).makeError( + message + ) primary["format"] = format_eval else: for primary in aslist(result): diff --git a/cwltool/context.py b/cwltool/context.py index eb2265cea..46ac74d68 100644 --- a/cwltool/context.py +++ b/cwltool/context.py @@ -53,9 +53,8 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None: setattr(self, k, v) -def make_tool_notimpl( - toolpath_object: CommentedMap, loadingContext: "LoadingContext" -) -> "Process": +def make_tool_notimpl(toolpath_object: CommentedMap, loadingContext: "LoadingContext") -> "Process": + """Fake implementation of the make tool function.""" raise NotImplementedError() @@ -155,9 +154,7 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None: self.rm_tmpdir: bool = True self.pull_image: bool = True self.rm_container: bool = True - self.move_outputs: Union[ - Literal["move"], Literal["leave"], Literal["copy"] - ] = "move" + self.move_outputs: Union[Literal["move"], Literal["leave"], Literal["copy"]] = "move" self.log_dir: str = "" self.set_log_dir = set_log_dir self.log_dir_handler = log_handler @@ -169,9 +166,7 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None: self.compute_checksum: bool = True self.name: str = "" self.default_container: Optional[str] = "" - self.find_default_container: Optional[ - Callable[[HasReqsHints], Optional[str]] - ] = None + self.find_default_container: Optional[Callable[[HasReqsHints], Optional[str]]] = None self.cachedir: Optional[str] = None self.part_of: str = "" self.basedir: str = "" @@ -187,9 +182,7 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None: self.job_script_provider: Optional[DependenciesConfiguration] = None self.select_resources: Optional[select_resources_callable] = None self.eval_timeout: float = 60 - self.postScatterEval: Optional[ - Callable[[CWLObjectType], Optional[CWLObjectType]] - ] = None + self.postScatterEval: Optional[Callable[[CWLObjectType], Optional[CWLObjectType]]] = None self.on_error: Union[Literal["stop"], Literal["continue"]] = "stop" self.strict_memory_limit: bool = False self.strict_cpu_limit: bool = False diff --git a/cwltool/cuda.py b/cwltool/cuda.py index 9f7355e63..797c7fdfe 100644 --- a/cwltool/cuda.py +++ b/cwltool/cuda.py @@ -30,14 +30,10 @@ def cuda_check(cuda_req: CWLObjectType, requestCount: int) -> int: return 0 versionf = float(version) if versionf < vmin: - _logger.warning( - "CUDA version '%s' is less than minimum version '%s'", version, vmin - ) + _logger.warning("CUDA version '%s' is less than minimum version '%s'", version, vmin) return 0 if requestCount > devices: - _logger.warning( - "Requested %d GPU devices but only %d available", requestCount, devices - ) + _logger.warning("Requested %d GPU devices but only %d available", requestCount, devices) return 0 return requestCount except Exception as e: diff --git a/cwltool/cwlrdf.py b/cwltool/cwlrdf.py index a65cbf3e5..0e1014b3d 100644 --- a/cwltool/cwlrdf.py +++ b/cwltool/cwlrdf.py @@ -52,8 +52,7 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None: for step, run, _ in qres: stdout.write( - '"%s" [label="%s"]\n' - % (lastpart(step), f"{lastpart(step)} ({lastpart(run)})") + '"%s" [label="%s"]\n' % (lastpart(step), f"{lastpart(step)} ({lastpart(run)})") ) qres = cast( @@ -70,12 +69,8 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None: for step, inp, source in qres: stdout.write('"%s" [shape=box]\n' % (lastpart(inp))) - stdout.write( - '"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(inp), "") - ) - stdout.write( - '"{}" -> "{}" [label="{}"]\n'.format(lastpart(inp), lastpart(step), "") - ) + stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(inp), "")) + stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(inp), lastpart(step), "")) qres = cast( Iterator[ResultRow], @@ -90,9 +85,7 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None: for step, out in qres: stdout.write('"%s" [shape=box]\n' % (lastpart(out))) - stdout.write( - '"{}" -> "{}" [label="{}"]\n'.format(lastpart(step), lastpart(out), "") - ) + stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(step), lastpart(out), "")) qres = cast( Iterator[ResultRow], @@ -107,9 +100,7 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None: for out, source in qres: stdout.write('"%s" [shape=octagon]\n' % (lastpart(out))) - stdout.write( - '"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(out), "") - ) + stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(out), "")) qres = cast( Iterator[ResultRow], @@ -172,9 +163,7 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non if wf in subworkflows: if wf not in dotname: dotname[wf] = "cluster_" + lastpart(wf) - stdout.write( - f'subgraph "{dotname[wf]}" {{ label="{lastpart(wf)}"\n' # noqa: B907 - ) + stdout.write(f'subgraph "{dotname[wf]}" {{ label="{lastpart(wf)}"\n') # noqa: B907 currentwf = wf clusternode[wf] = step else: @@ -182,8 +171,7 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non if str(runtype) != "https://w3id.org/cwl/cwl#Workflow": stdout.write( - '"%s" [label="%s"]\n' - % (dotname[step], urllib.parse.urldefrag(str(step))[1]) + '"%s" [label="%s"]\n' % (dotname[step], urllib.parse.urldefrag(str(step))[1]) ) if currentwf is not None: diff --git a/cwltool/cwlviewer.py b/cwltool/cwlviewer.py index d259f278c..47a404a25 100644 --- a/cwltool/cwlviewer.py +++ b/cwltool/cwlviewer.py @@ -52,9 +52,7 @@ def _set_inner_edges(self) -> None: else "lightgoldenrodyellow" ) source_style = ( - "dashed" - if inner_edge_row["source_step_class"].endswith("Operation") - else "filled" + "dashed" if inner_edge_row["source_step_class"].endswith("Operation") else "filled" ) n = pydot.Node( "", @@ -77,9 +75,7 @@ def _set_inner_edges(self) -> None: else "lightgoldenrodyellow" ) target_style = ( - "dashed" - if inner_edge_row["target_step_class"].endswith("Operation") - else "filled" + "dashed" if inner_edge_row["target_step_class"].endswith("Operation") else "filled" ) n = pydot.Node( "", @@ -123,9 +119,7 @@ def _set_input_edges(self) -> None: ) n.set_name(str(input_row["input"])) inputs_subgraph.add_node(n) - self._dot_graph.add_edge( - pydot.Edge(str(input_row["input"]), str(input_row["step"])) - ) + self._dot_graph.add_edge(pydot.Edge(str(input_row["input"]), str(input_row["step"]))) def _set_output_edges(self) -> None: with open(_get_output_edges_query_path) as f: @@ -153,9 +147,7 @@ def _set_output_edges(self) -> None: ) n.set_name(str(output_edge_row["output"])) outputs_graph.add_node(n) - self._dot_graph.add_edge( - pydot.Edge(output_edge_row["step"], output_edge_row["output"]) - ) + self._dot_graph.add_edge(pydot.Edge(output_edge_row["step"], output_edge_row["output"])) def _get_root_graph_uri(self) -> rdflib.term.Identifier: with open(_get_root_query_path) as f: diff --git a/cwltool/docker.py b/cwltool/docker.py index ac000d077..da7fa84b5 100644 --- a/cwltool/docker.py +++ b/cwltool/docker.py @@ -106,10 +106,7 @@ def get_image( """ found = False - if ( - "dockerImageId" not in docker_requirement - and "dockerPull" in docker_requirement - ): + if "dockerImageId" not in docker_requirement and "dockerPull" in docker_requirement: docker_requirement["dockerImageId"] = docker_requirement["dockerPull"] with _IMAGES_LOCK: @@ -117,9 +114,7 @@ def get_image( return True for line in ( - subprocess.check_output( # nosec - [self.docker_exec, "images", "--no-trunc", "--all"] - ) + subprocess.check_output([self.docker_exec, "images", "--no-trunc", "--all"]) # nosec .decode("utf-8") .splitlines() ): @@ -178,17 +173,13 @@ def get_image( docker_requirement["dockerLoad"], ) with open(docker_requirement["dockerLoad"], "rb") as dload: - loadproc = subprocess.Popen( # nosec - cmd, stdin=dload, stdout=sys.stderr - ) + loadproc = subprocess.Popen(cmd, stdin=dload, stdout=sys.stderr) # nosec else: loadproc = subprocess.Popen( # nosec cmd, stdin=subprocess.PIPE, stdout=sys.stderr ) assert loadproc.stdin is not None # nosec - _logger.info( - "Sending GET request to %s", docker_requirement["dockerLoad"] - ) + _logger.info("Sending GET request to %s", docker_requirement["dockerLoad"]) req = requests.get(docker_requirement["dockerLoad"], stream=True) size = 0 for chunk in req.iter_content(1024 * 1024): @@ -229,16 +220,12 @@ def get_from_requirements( if not shutil.which(self.docker_exec): raise WorkflowException(f"{self.docker_exec} executable is not available") - if self.get_image( - cast(Dict[str, str], r), pull_image, force_pull, tmp_outdir_prefix - ): + if self.get_image(cast(Dict[str, str], r), pull_image, force_pull, tmp_outdir_prefix): return cast(Optional[str], r["dockerImageId"]) raise WorkflowException("Docker image %s not found" % r["dockerImageId"]) @staticmethod - def append_volume( - runtime: List[str], source: str, target: str, writable: bool = False - ) -> None: + def append_volume(runtime: List[str], source: str, target: str, writable: bool = False) -> None: """Add binding arguments to the runtime list.""" options = [ "type=bind", @@ -307,9 +294,7 @@ def add_writable_directory_volume( os.makedirs(host_outdir_tgt) else: if self.inplace_update: - self.append_volume( - runtime, volume.resolved, volume.target, writable=True - ) + self.append_volume(runtime, volume.resolved, volume.target, writable=True) else: if not host_outdir_tgt: tmpdir = create_tmp_dir(tmpdir_prefix) @@ -374,7 +359,6 @@ def create_runtime( runtime.append("--workdir=%s" % (self.builder.outdir)) if not user_space_docker_cmd: - if not runtimeContext.no_read_only: runtime.append("--read-only=true") @@ -390,9 +374,7 @@ def create_runtime( euid, egid = docker_vm_id() euid, egid = euid or os.geteuid(), egid or os.getgid() - if runtimeContext.no_match_user is False and ( - euid is not None and egid is not None - ): + if runtimeContext.no_match_user is False and (euid is not None and egid is not None): runtime.append("--user=%d:%d" % (euid, egid)) if runtimeContext.rm_container: diff --git a/cwltool/executors.py b/cwltool/executors.py index 027da9b2a..1ca679656 100644 --- a/cwltool/executors.py +++ b/cwltool/executors.py @@ -56,12 +56,9 @@ def __call__( runtime_context: RuntimeContext, logger: logging.Logger = _logger, ) -> Tuple[Optional[CWLObjectType], str]: - return self.execute(process, job_order_object, runtime_context, logger) - def output_callback( - self, out: Optional[CWLObjectType], process_status: str - ) -> None: + def output_callback(self, out: Optional[CWLObjectType], process_status: str) -> None: """Collect the final status and outputs.""" self.final_status.append(process_status) self.final_output.append(out) @@ -89,9 +86,9 @@ def execute( def check_for_abstract_op(tool: CWLObjectType) -> None: if tool["class"] == "Operation": - raise SourceLine( - tool, "class", WorkflowException, runtime_context.debug - ).makeError("Workflow has unrunnable abstract Operation") + raise SourceLine(tool, "class", WorkflowException, runtime_context.debug).makeError( + "Workflow has unrunnable abstract Operation" + ) process.visit(check_for_abstract_op) @@ -119,10 +116,8 @@ def check_for_abstract_op(tool: CWLObjectType) -> None: List[CWLObjectType], job_order_object["https://w3id.org/cwl/cwl#requirements"], ) - elif ( - "cwl:defaults" in process.metadata - and "https://w3id.org/cwl/cwl#requirements" - in cast(CWLObjectType, process.metadata["cwl:defaults"]) + elif "cwl:defaults" in process.metadata and "https://w3id.org/cwl/cwl#requirements" in cast( + CWLObjectType, process.metadata["cwl:defaults"] ): if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0": raise WorkflowException( @@ -142,11 +137,7 @@ def check_for_abstract_op(tool: CWLObjectType) -> None: self.run_jobs(process, job_order_object, logger, runtime_context) - if ( - self.final_output - and self.final_output[0] is not None - and finaloutdir is not None - ): + if self.final_output and self.final_output[0] is not None and finaloutdir is not None: self.final_output[0] = relocateOutputs( self.final_output[0], finaloutdir, @@ -168,19 +159,14 @@ def check_for_abstract_op(tool: CWLObjectType) -> None: cleanIntermediate(output_dirs) if self.final_output and self.final_status: - if ( runtime_context.research_obj is not None - and isinstance( - process, (JobBase, Process, WorkflowJobStep, WorkflowJob) - ) + and isinstance(process, (JobBase, Process, WorkflowJobStep, WorkflowJob)) and process.parent_wf ): process_run_id: Optional[str] = None name = "primary" - process.parent_wf.generate_output_prov( - self.final_output[0], process_run_id, name - ) + process.parent_wf.generate_output_prov(self.final_output[0], process_run_id, name) process.parent_wf.document.wasEndedBy( process.parent_wf.workflow_run_uri, None, @@ -202,14 +188,10 @@ def run_jobs( logger: logging.Logger, runtime_context: RuntimeContext, ) -> None: - process_run_id: Optional[str] = None # define provenance profile for single commandline tool - if ( - not isinstance(process, Workflow) - and runtime_context.research_obj is not None - ): + if not isinstance(process, Workflow) and runtime_context.research_obj is not None: process.provenance_object = ProvenanceProfile( runtime_context.research_obj, full_name=runtime_context.cwl_full_name, @@ -293,8 +275,7 @@ def select_resources( rsc_min = request[rsc + "Min"] if rsc_min > maxrsc[rsc]: raise WorkflowException( - f"Requested at least {rsc_min} {rsc} but only " - f"{maxrsc[rsc]} available" + f"Requested at least {rsc_min} {rsc} but only " f"{maxrsc[rsc]} available" ) rsc_max = request[rsc + "Max"] if rsc_max < maxrsc[rsc]: @@ -418,18 +399,12 @@ def run_jobs( logger: logging.Logger, runtime_context: RuntimeContext, ) -> None: - self.taskqueue: TaskQueue = TaskQueue(threading.Lock(), psutil.cpu_count()) try: - - jobiter = process.job( - job_order_object, self.output_callback, runtime_context - ) + jobiter = process.job(job_order_object, self.output_callback, runtime_context) if runtime_context.workflow_eval_lock is None: - raise WorkflowException( - "runtimeContext.workflow_eval_lock must not be None" - ) + raise WorkflowException("runtimeContext.workflow_eval_lock must not be None") runtime_context.workflow_eval_lock.acquire() for job in jobiter: diff --git a/cwltool/job.py b/cwltool/job.py index 04dcf0013..ac0640fff 100644 --- a/cwltool/job.py +++ b/cwltool/job.py @@ -91,9 +91,7 @@ def relink_initialworkdir( # directory, so therefore ineligable for being an output file. # Thus, none of our business continue - host_outdir_tgt = os.path.join( - host_outdir, vol.target[len(container_outdir) + 1 :] - ) + host_outdir_tgt = os.path.join(host_outdir, vol.target[len(container_outdir) + 1 :]) if os.path.islink(host_outdir_tgt) or os.path.isfile(host_outdir_tgt): try: os.remove(host_outdir_tgt) @@ -176,14 +174,9 @@ def run( pass def _setup(self, runtimeContext: RuntimeContext) -> None: - - cuda_req, _ = self.builder.get_requirement( - "http://commonwl.org/cwltool#CUDARequirement" - ) + cuda_req, _ = self.builder.get_requirement("http://commonwl.org/cwltool#CUDARequirement") if cuda_req: - count = cuda_check( - cuda_req, math.ceil(self.builder.resources["cudaDeviceCount"]) - ) + count = cuda_check(cuda_req, math.ceil(self.builder.resources["cudaDeviceCount"])) if count == 0: raise WorkflowException("Could not satisfy CUDARequirement") @@ -201,9 +194,7 @@ def is_streamable(file: str) -> bool: for knownfile in self.pathmapper.files(): p = self.pathmapper.mapper(knownfile) if p.type == "File" and not os.path.isfile(p[0]) and p.staged: - if not ( - is_streamable(knownfile) and stat.S_ISFIFO(os.stat(p[0]).st_mode) - ): + if not (is_streamable(knownfile) and stat.S_ISFIFO(os.stat(p[0]).st_mode)): raise WorkflowException( "Input file %s (at %s) not found or is not a regular " "file." % (knownfile, self.pathmapper.mapper(knownfile)[0]) @@ -223,10 +214,7 @@ def is_streamable(file: str) -> bool: "[job %s] initial work dir %s", self.name, json_dumps( - { - p: self.generatemapper.mapper(p) - for p in self.generatemapper.files() - }, + {p: self.generatemapper.mapper(p) for p in self.generatemapper.files()}, indent=4, ), ) @@ -284,12 +272,8 @@ def _execute( ] ), " < %s" % self.stdin if self.stdin else "", - " > %s" % os.path.join(self.base_path_logs, self.stdout) - if self.stdout - else "", - " 2> %s" % os.path.join(self.base_path_logs, self.stderr) - if self.stderr - else "", + " > %s" % os.path.join(self.base_path_logs, self.stdout) if self.stdout else "", + " 2> %s" % os.path.join(self.base_path_logs, self.stderr) if self.stderr else "", ) if self.joborder is not None and runtimeContext.research_obj is not None: job_order = self.joborder @@ -392,8 +376,7 @@ def stderr_stdout_log_path( ) else: raise ValueError( - "'listing' in self.generatefiles but no " - "generatemapper was setup." + "'listing' in self.generatefiles but no " "generatemapper was setup." ) runtimeContext.log_dir_handler( self.outdir, self.base_path_logs, stdout_path, stderr_path @@ -433,9 +416,7 @@ def stderr_stdout_log_path( _logger.info("[job %s] completed %s", self.name, processStatus) if _logger.isEnabledFor(logging.DEBUG): - _logger.debug( - "[job %s] outputs %s", self.name, json_dumps(outputs, indent=4) - ) + _logger.debug("[job %s] outputs %s", self.name, json_dumps(outputs, indent=4)) if self.generatemapper is not None and runtimeContext.secret_store is not None: # Delete any runtime-generated files containing secrets. @@ -452,9 +433,7 @@ def stderr_stdout_log_path( os.remove(host_outdir_tgt) if runtimeContext.workflow_eval_lock is None: - raise WorkflowException( - "runtimeContext.workflow_eval_lock must not be None" - ) + raise WorkflowException("runtimeContext.workflow_eval_lock must not be None") if self.output_callback: with runtimeContext.workflow_eval_lock: @@ -469,9 +448,7 @@ def stderr_stdout_log_path( shutil.rmtree(self.stagedir, True) if runtimeContext.rm_tmpdir: - _logger.debug( - "[job %s] Removing temporary directory %s", self.name, self.tmpdir - ) + _logger.debug("[job %s] Removing temporary directory %s", self.name, self.tmpdir) shutil.rmtree(self.tmpdir, True) @abstractmethod @@ -507,9 +484,7 @@ def prepare_environment( self._preserve_environment_on_containers_warning() env.update(os.environ) elif runtimeContext.preserve_environment: - self._preserve_environment_on_containers_warning( - runtimeContext.preserve_environment - ) + self._preserve_environment_on_containers_warning(runtimeContext.preserve_environment) for key in runtimeContext.preserve_environment: try: env[key] = os.environ[key] @@ -539,9 +514,7 @@ def get_tree_mem_usage(memory_usage: MutableSequence[Optional[int]]) -> None: rss = monitor.memory_info().rss while len(children): rss += sum(process.memory_info().rss for process in children) - children = list( - itertools.chain(*(process.children() for process in children)) - ) + children = list(itertools.chain(*(process.children() for process in children))) if memory_usage[0] is None or rss > memory_usage[0]: memory_usage[0] = rss except psutil.NoSuchProcess: @@ -559,9 +532,7 @@ def get_tree_mem_usage(memory_usage: MutableSequence[Optional[int]]) -> None: round(memory_usage[0] / (2**20)), ) else: - _logger.debug( - "Could not collect memory usage, job ended before monitoring began." - ) + _logger.debug("Could not collect memory usage, job ended before monitoring began.") class CommandLineJob(JobBase): @@ -570,7 +541,6 @@ def run( runtimeContext: RuntimeContext, tmpdir_lock: Optional[threading.Lock] = None, ) -> None: - if tmpdir_lock: with tmpdir_lock: if not os.path.exists(self.tmpdir): @@ -643,9 +613,7 @@ def create_runtime( @staticmethod @abstractmethod - def append_volume( - runtime: List[str], source: str, target: str, writable: bool = False - ) -> None: + def append_volume(runtime: List[str], source: str, target: str, writable: bool = False) -> None: """Add binding arguments to the runtime list.""" @abstractmethod @@ -733,9 +701,7 @@ def add_volumes( for key, vol in (itm for itm in pathmapper.items() if itm[1].staged): host_outdir_tgt: Optional[str] = None if vol.target.startswith(container_outdir + "/"): - host_outdir_tgt = os.path.join( - self.outdir, vol.target[len(container_outdir) + 1 :] - ) + host_outdir_tgt = os.path.join(self.outdir, vol.target[len(container_outdir) + 1 :]) if not host_outdir_tgt and not any_path_okay: raise WorkflowException( "No mandatory DockerRequirement, yet path is outside " @@ -745,13 +711,9 @@ def add_volumes( if vol.type in ("File", "Directory"): self.add_file_or_directory_volume(runtime, vol, host_outdir_tgt) elif vol.type == "WritableFile": - self.add_writable_file_volume( - runtime, vol, host_outdir_tgt, tmpdir_prefix - ) + self.add_writable_file_volume(runtime, vol, host_outdir_tgt, tmpdir_prefix) elif vol.type == "WritableDirectory": - self.add_writable_directory_volume( - runtime, vol, host_outdir_tgt, tmpdir_prefix - ) + self.add_writable_directory_volume(runtime, vol, host_outdir_tgt, tmpdir_prefix) elif vol.type in ["CreateFile", "CreateWritableFile"]: new_path = self.create_file_and_add_volume( runtime, vol, host_outdir_tgt, secret_store, tmpdir_prefix @@ -788,9 +750,7 @@ def run( try: subprocess.check_call(cmd, stdout=sys.stderr) # nosec except OSError as exc: - raise SourceLine( - docker_req, None, WorkflowException, debug - ).makeError( + raise SourceLine(docker_req, None, WorkflowException, debug).makeError( f"Either Docker container {img_id} is not available with " f"user space docker implementation {user_space_docker_cmd} " f" or {user_space_docker_cmd} is missing or broken." @@ -818,11 +778,7 @@ def run( if default_container: img_id = str(default_container) - if ( - docker_req is not None - and img_id is None - and runtimeContext.use_container - ): + if docker_req is not None and img_id is None and runtimeContext.use_container: raise Exception("Docker image not available") if ( @@ -851,9 +807,7 @@ def run( _logger.debug("%s error", container, exc_info=True) if docker_is_req: raise UnsupportedRequirement( - "{} is required to run this tool: {}".format( - container, str(err) - ) + "{} is required to run this tool: {}".format(container, str(err)) ) from err else: raise WorkflowException( @@ -905,14 +859,12 @@ def docker_monitor( try: os.remove(cidfile) except OSError as exc: - _logger.warning( - "Ignored error cleaning up %s cidfile: %s", docker_exe, exc - ) + _logger.warning("Ignored error cleaning up %s cidfile: %s", docker_exe, exc) return try: with open(cidfile) as cidhandle: cid = cidhandle.readline().strip() - except (OSError): + except OSError: cid = None max_mem = psutil.virtual_memory().total tmp_dir, tmp_prefix = os.path.split(tmpdir_prefix) @@ -942,15 +894,11 @@ def docker_monitor( if not line: break try: - mem_percent = float( - re.sub(CONTROL_CODE_RE, "", line).replace("%", "") - ) + mem_percent = float(re.sub(CONTROL_CODE_RE, "", line).replace("%", "")) if mem_percent > max_mem_percent: max_mem_percent = mem_percent except ValueError as exc: - _logger.debug( - "%s stats parsing error in line %s: %s", docker_exe, line, exc - ) + _logger.debug("%s stats parsing error in line %s: %s", docker_exe, line, exc) _logger.info( "[job %s] Max memory used: %iMiB", self.name, @@ -975,9 +923,7 @@ def _job_popen( default_stdout: Optional[Union[IO[bytes], TextIO]] = None, default_stderr: Optional[Union[IO[bytes], TextIO]] = None, ) -> int: - if job_script_contents is None and not FORCE_SHELLED_POPEN: - stdin: Union[IO[bytes], int] = subprocess.PIPE if stdin_path is not None: stdin = open(stdin_path, "rb") @@ -1059,9 +1005,7 @@ def terminate(): # type: () -> None job_dir = make_job_dir() try: - with open( - os.path.join(job_dir, "job.json"), mode="w", encoding="utf-8" - ) as job_file: + with open(os.path.join(job_dir, "job.json"), mode="w", encoding="utf-8") as job_file: json_dump(job_description, job_file, ensure_ascii=False) job_script = os.path.join(job_dir, "run_job.bash") with open(job_script, "w") as _: diff --git a/cwltool/load_tool.py b/cwltool/load_tool.py index 2db2ca1a2..50986cc56 100644 --- a/cwltool/load_tool.py +++ b/cwltool/load_tool.py @@ -86,7 +86,6 @@ def resolve_tool_uri( fetcher_constructor: Optional[FetcherCallableType] = None, document_loader: Optional[Loader] = None, ) -> Tuple[str, str]: - uri = None # type: Optional[str] split = urllib.parse.urlsplit(argsworkflow) # In case of Windows path, urlsplit misjudge Drive letters as scheme, here we are skipping that @@ -95,9 +94,7 @@ def resolve_tool_uri( elif os.path.exists(os.path.abspath(argsworkflow)): uri = file_uri(str(os.path.abspath(argsworkflow))) elif resolver is not None: - uri = resolver( - document_loader or default_loader(fetcher_constructor), argsworkflow - ) + uri = resolver(document_loader or default_loader(fetcher_constructor), argsworkflow) if uri is None: raise ValidationException("Not found: '%s'" % argsworkflow) @@ -138,23 +135,15 @@ def fetch_document( ) return loadingContext, workflowobj, uri if isinstance(argsworkflow, MutableMapping): - uri = ( - cast(str, argsworkflow["id"]) - if argsworkflow.get("id") - else "_:" + str(uuid.uuid4()) - ) - workflowobj = cast( - CommentedMap, cmap(cast(Dict[str, Any], argsworkflow), fn=uri) - ) + uri = cast(str, argsworkflow["id"]) if argsworkflow.get("id") else "_:" + str(uuid.uuid4()) + workflowobj = cast(CommentedMap, cmap(cast(Dict[str, Any], argsworkflow), fn=uri)) loadingContext.loader.idx[uri] = workflowobj return loadingContext, workflowobj, uri raise ValidationException("Must be URI or object: '%s'" % argsworkflow) def _convert_stdstreams_to_files( - workflowobj: Union[ - CWLObjectType, MutableSequence[Union[CWLObjectType, str, int]], str - ] + workflowobj: Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str, int]], str] ) -> None: if isinstance(workflowobj, MutableMapping): if workflowobj.get("class") == "CommandLineTool": @@ -167,13 +156,9 @@ def _convert_stdstreams_to_files( outputs = workflowobj.get("outputs", []) if not isinstance(outputs, CommentedSeq): raise ValidationException('"outputs" section is not ' "valid.") - for out in cast( - MutableSequence[CWLObjectType], workflowobj.get("outputs", []) - ): + for out in cast(MutableSequence[CWLObjectType], workflowobj.get("outputs", [])): if not isinstance(out, CommentedMap): - raise ValidationException( - f"Output {out!r} is not a valid OutputParameter." - ) + raise ValidationException(f"Output {out!r} is not a valid OutputParameter.") for streamtype in ["stdout", "stderr"]: if out.get("type") == streamtype: if "outputBinding" in out: @@ -186,27 +171,21 @@ def _convert_stdstreams_to_files( else: filename = str( hashlib.sha1( # nosec - json_dumps(workflowobj, sort_keys=True).encode( - "utf-8" - ) + json_dumps(workflowobj, sort_keys=True).encode("utf-8") ).hexdigest() ) workflowobj[streamtype] = filename out["type"] = "File" out["outputBinding"] = cmap({"glob": filename}) - for inp in cast( - MutableSequence[CWLObjectType], workflowobj.get("inputs", []) - ): + for inp in cast(MutableSequence[CWLObjectType], workflowobj.get("inputs", [])): if inp.get("type") == "stdin": if "inputBinding" in inp: raise ValidationException( - "Not allowed to specify inputBinding when" - " using stdin shortcut." + "Not allowed to specify inputBinding when" " using stdin shortcut." ) if "stdin" in workflowobj: raise ValidationException( - "Not allowed to specify stdin path when" - " using stdin type shortcut." + "Not allowed to specify stdin path when" " using stdin type shortcut." ) else: workflowobj["stdin"] = ( @@ -284,7 +263,6 @@ def _fast_parser_convert_stdstreams_to_files( def _fast_parser_expand_hint_class( hints: Optional[Any], loadingOptions: cwl_v1_2.LoadingOptions ) -> None: - if isinstance(hints, MutableSequence): for h in hints: if isinstance(h, MutableMapping) and "class" in h: @@ -321,9 +299,7 @@ def fast_parser( loadingContext: LoadingContext, fetcher: Fetcher, ) -> Tuple[Union[CommentedMap, CommentedSeq], CommentedMap]: - lopt = cwl_v1_2.LoadingOptions( - idx=loadingContext.codegen_idx, fileuri=fileuri, fetcher=fetcher - ) + lopt = cwl_v1_2.LoadingOptions(idx=loadingContext.codegen_idx, fileuri=fileuri, fetcher=fetcher) if uri not in loadingContext.codegen_idx: cwl_v1_2.load_document_with_metadata( @@ -405,9 +381,7 @@ def resolve_and_validate_document( if not isinstance(workflowobj, MutableMapping): raise ValueError( - "workflowjobj must be a dict, got '{}': {}".format( - type(workflowobj), workflowobj - ) + "workflowjobj must be a dict, got '{}': {}".format(type(workflowobj), workflowobj) ) jobobj = None @@ -442,12 +416,8 @@ def resolve_and_validate_document( ) if not isinstance(cwlVersion, str): - with SourceLine( - workflowobj, "cwlVersion", ValidationException, loadingContext.debug - ): - raise ValidationException( - f"'cwlVersion' must be a string, got {type(cwlVersion)}" - ) + with SourceLine(workflowobj, "cwlVersion", ValidationException, loadingContext.debug): + raise ValidationException(f"'cwlVersion' must be a string, got {type(cwlVersion)}") # strip out version cwlVersion = re.sub(r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "", cwlVersion) if cwlVersion not in list(ALLUPDATES): @@ -464,17 +434,11 @@ def resolve_and_validate_document( "\n{}".format("\n".join(versions)) ) - if ( - isinstance(jobobj, CommentedMap) - and "http://commonwl.org/cwltool#overrides" in jobobj - ): + if isinstance(jobobj, CommentedMap) and "http://commonwl.org/cwltool#overrides" in jobobj: loadingContext.overrides_list.extend(resolve_overrides(jobobj, uri, uri)) del jobobj["http://commonwl.org/cwltool#overrides"] - if ( - isinstance(jobobj, CommentedMap) - and "https://w3id.org/cwl/cwl#requirements" in jobobj - ): + if isinstance(jobobj, CommentedMap) and "https://w3id.org/cwl/cwl#requirements" in jobobj: if cwlVersion not in ("v1.1.0-dev1", "v1.1"): raise ValidationException( "`cwl:requirements` in the input object is not part of CWL " @@ -552,9 +516,7 @@ def resolve_and_validate_document( metadata = copy.copy(metadata) if not isinstance(metadata, CommentedMap): - raise ValidationException( - "metadata must be a CommentedMap, was %s" % type(metadata) - ) + raise ValidationException("metadata must be a CommentedMap, was %s" % type(metadata)) if isinstance(processobj, CommentedMap): uri = processobj["id"] @@ -602,11 +564,7 @@ def make_tool( resolveduri: Union[float, str, CommentedMap, CommentedSeq, None] metadata: CWLObjectType - if ( - loadingContext.fast_parser - and isinstance(uri, str) - and not loadingContext.skip_resolve_all - ): + if loadingContext.fast_parser and isinstance(uri, str) and not loadingContext.skip_resolve_all: resolveduri, metadata = fast_parser( None, None, uri, loadingContext, loadingContext.loader.fetcher ) @@ -623,9 +581,7 @@ def make_tool( raise GraphTargetMissingException( "Tool file contains graph of multiple objects, must specify " "one of #%s" - % ", #".join( - urllib.parse.urldefrag(i["id"])[1] for i in resolveduri if "id" in i - ) + % ", #".join(urllib.parse.urldefrag(i["id"])[1] for i in resolveduri if "id" in i) ) elif isinstance(resolveduri, MutableMapping): processobj = resolveduri @@ -647,7 +603,6 @@ def load_tool( argsworkflow: Union[str, CWLObjectType], loadingContext: Optional[LoadingContext] = None, ) -> Process: - loadingContext, workflowobj, uri = fetch_document(argsworkflow, loadingContext) loadingContext, uri = resolve_and_validate_document( diff --git a/cwltool/loghandler.py b/cwltool/loghandler.py index 6d8c633c4..c1f451991 100644 --- a/cwltool/loghandler.py +++ b/cwltool/loghandler.py @@ -32,7 +32,5 @@ def configure_logging( fmtclass = coloredlogs.ColoredFormatter if enable_color else logging.Formatter formatter = fmtclass("%(levelname)s %(message)s") if timestamps: - formatter = fmtclass( - "[%(asctime)s] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S" - ) + formatter = fmtclass("[%(asctime)s] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S") stderr_handler.setFormatter(formatter) diff --git a/cwltool/main.py b/cwltool/main.py index d317cd033..0fb7de6c3 100755 --- a/cwltool/main.py +++ b/cwltool/main.py @@ -183,9 +183,7 @@ def generate_example_input( "float": 0.1, "double": 0.1, "string": "a_string", - "File": ruamel.yaml.comments.CommentedMap( - [("class", "File"), ("path", "a/file/path")] - ), + "File": ruamel.yaml.comments.CommentedMap([("class", "File"), ("path", "a/file/path")]), "Directory": ruamel.yaml.comments.CommentedMap( [("class", "Directory"), ("path", "a/directory/path")] ), @@ -304,16 +302,10 @@ def realize_input_schema( if isinstance(entry["type"], Mapping): entry["type"] = cast( CWLOutputAtomType, - realize_input_schema( - [cast(CWLObjectType, entry["type"])], schema_defs - ), + realize_input_schema([cast(CWLObjectType, entry["type"])], schema_defs), ) if entry["type"] == "array": - items = ( - entry["items"] - if not isinstance(entry["items"], str) - else [entry["items"]] - ) + items = entry["items"] if not isinstance(entry["items"], str) else [entry["items"]] entry["items"] = cast( CWLOutputAtomType, realize_input_schema( @@ -325,9 +317,7 @@ def realize_input_schema( entry["fields"] = cast( CWLOutputAtomType, realize_input_schema( - cast( - MutableSequence[Union[str, CWLObjectType]], entry["fields"] - ), + cast(MutableSequence[Union[str, CWLObjectType]], entry["fields"]), schema_defs, ), ) @@ -354,7 +344,6 @@ def load_job_order( overrides_list: List[CWLObjectType], tool_file_uri: str, ) -> Tuple[Optional[CWLObjectType], str, Loader]: - job_order_object = None job_order_file = None @@ -366,9 +355,7 @@ def load_job_order( elif len(args.job_order) == 1 and args.job_order[0] == "-": yaml = yaml_no_ts() job_order_object = yaml.load(stdin) - job_order_object, _ = loader.resolve_all( - job_order_object, file_uri(os.getcwd()) + "/" - ) + job_order_object, _ = loader.resolve_all(job_order_object, file_uri(os.getcwd()) + "/") else: job_order_file = None @@ -376,9 +363,7 @@ def load_job_order( input_basedir = args.basedir if args.basedir else os.getcwd() elif job_order_file is not None: input_basedir = ( - args.basedir - if args.basedir - else os.path.abspath(os.path.dirname(job_order_file)) + args.basedir if args.basedir else os.path.abspath(os.path.dirname(job_order_file)) ) job_order_object, _ = loader.resolve_ref( job_order_file, @@ -386,22 +371,15 @@ def load_job_order( content_types=CWL_CONTENT_TYPES, ) - if ( - job_order_object is not None - and "http://commonwl.org/cwltool#overrides" in job_order_object - ): + if job_order_object is not None and "http://commonwl.org/cwltool#overrides" in job_order_object: ov_uri = file_uri(job_order_file or input_basedir) - overrides_list.extend( - resolve_overrides(job_order_object, ov_uri, tool_file_uri) - ) + overrides_list.extend(resolve_overrides(job_order_object, ov_uri, tool_file_uri)) del job_order_object["http://commonwl.org/cwltool#overrides"] if job_order_object is None: input_basedir = args.basedir if args.basedir else os.getcwd() - if job_order_object is not None and not isinstance( - job_order_object, MutableMapping - ): + if job_order_object is not None and not isinstance(job_order_object, MutableMapping): _logger.error( "CWL input object at %s is not formatted correctly, it should be a " "JSON/YAML dictionary, not %s.\n" @@ -447,9 +425,7 @@ def init_job_order( cmd_line = vars(toolparser.parse_args(args.job_order)) for record_name in records: record = {} - record_items = { - k: v for k, v in cmd_line.items() if k.startswith(record_name) - } + record_items = {k: v for k, v in cmd_line.items() if k.startswith(record_name)} for key, value in record_items.items(): record[key[len(record_name) + 1 :]] = value del cmd_line[key] @@ -461,9 +437,7 @@ def init_job_order( loader.resolve_ref(cmd_line["job_order"])[0], ) except Exception: - _logger.exception( - "Failed to resolv job_order: %s", cmd_line["job_order"] - ) + _logger.exception("Failed to resolv job_order: %s", cmd_line["job_order"]) exit(1) else: job_order_object = {"id": args.workflow} @@ -631,9 +605,7 @@ def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None] nestdirs=nestdirs, ) if sfs is not None: - deps["secondaryFiles"] = cast( - MutableSequence[CWLOutputAtomType], mergedirs(sfs) - ) + deps["secondaryFiles"] = cast(MutableSequence[CWLOutputAtomType], mergedirs(sfs)) return deps @@ -695,12 +667,9 @@ def __init__(self) -> None: """Use the default formatter with our custom formatstring.""" super().__init__("[%(asctime)sZ] %(message)s") - def formatTime( - self, record: logging.LogRecord, datefmt: Optional[str] = None - ) -> str: - formatted_time = time.strftime( - "%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created)) - ) + def formatTime(self, record: logging.LogRecord, datefmt: Optional[str] = None) -> str: + """Override the default formatTime to include the timezone.""" + formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))) with_msecs = f"{formatted_time},{record.msecs:03f}" return with_msecs @@ -754,9 +723,7 @@ def setup_loadingContext( doc_cache=args.doc_cache, ) loadingContext.research_obj = runtimeContext.research_obj - loadingContext.disable_js_validation = args.disable_js_validation or ( - not args.do_validate - ) + loadingContext.disable_js_validation = args.disable_js_validation or (not args.do_validate) loadingContext.construct_tool_object = getdefault( loadingContext.construct_tool_object, workflow.default_make_tool ) @@ -770,15 +737,11 @@ def setup_loadingContext( def make_template(tool: Process, target: IO[str]) -> None: """Make a template CWL input object for the give Process.""" - def my_represent_none( - self: Any, data: Any - ) -> Any: # pylint: disable=unused-argument + def my_represent_none(self: Any, data: Any) -> Any: # pylint: disable=unused-argument """Force clean representation of 'null'.""" return self.represent_scalar("tag:yaml.org,2002:null", "null") - ruamel.yaml.representer.RoundTripRepresenter.add_representer( - type(None), my_represent_none - ) + ruamel.yaml.representer.RoundTripRepresenter.add_representer(type(None), my_represent_none) yaml = YAML() yaml.default_flow_style = False yaml.indent = 4 @@ -927,8 +890,7 @@ def check_working_directories( ): sl = ( "/" - if getattr(runtimeContext, dirprefix).endswith("/") - or dirprefix == "cachedir" + if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir" else "" ) setattr( @@ -956,8 +918,7 @@ def print_targets( if tool.tool[f]: _logger.info("%s %s%s targets:", prefix[:-1], f[0].upper(), f[1:-1]) print( - " " - + "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]), + " " + "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]), file=stdout, ) if "steps" in tool.tool: @@ -974,9 +935,7 @@ def print_targets( process = make_tool(cast(CommentedMap, cmap(run)), loading_context) else: process = run - print_targets( - process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/" - ) + print_targets(process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/") def main( @@ -1089,9 +1048,7 @@ def main( if argsl is None: raise Exception("argsl cannot be None") try: - prov_log_stream, prov_log_handler = setup_provenance( - args, argsl, runtimeContext - ) + prov_log_stream, prov_log_handler = setup_provenance(args, argsl, runtimeContext) except ArgumentException: return 1 @@ -1103,9 +1060,7 @@ def main( fetcher_constructor=loadingContext.fetcher_constructor, ) - try_again_msg = ( - "" if args.debug else ", try again with --debug for more information" - ) + try_again_msg = "" if args.debug else ", try again with --debug for more information" try: job_order_object, input_basedir, jobloader = load_job_order( @@ -1118,17 +1073,13 @@ def main( if args.overrides: loadingContext.overrides_list.extend( - load_overrides( - file_uri(os.path.abspath(args.overrides)), tool_file_uri - ) + load_overrides(file_uri(os.path.abspath(args.overrides)), tool_file_uri) ) loadingContext, workflowobj, uri = fetch_document(uri, loadingContext) if args.print_deps and loadingContext.loader: - printdeps( - workflowobj, loadingContext.loader, stdout, args.relative_deps, uri - ) + printdeps(workflowobj, loadingContext.loader, stdout, args.relative_deps, uri) return 0 loadingContext, uri = resolve_and_validate_document( @@ -1148,9 +1099,7 @@ def main( if args.provenance and runtimeContext.research_obj: # Can't really be combined with args.pack at same time - runtimeContext.research_obj.packed_workflow( - print_pack(loadingContext, uri) - ) + runtimeContext.research_obj.packed_workflow(print_pack(loadingContext, uri)) if args.print_pre: json_dump( @@ -1235,10 +1184,8 @@ def main( ) return 0 - except (ValidationException) as exc: - _logger.error( - "Tool definition failed validation:\n%s", str(exc), exc_info=args.debug - ) + except ValidationException as exc: + _logger.error("Tool definition failed validation:\n%s", str(exc), exc_info=args.debug) return 1 except (RuntimeError, WorkflowException) as exc: _logger.error( @@ -1279,12 +1226,8 @@ def main( runtimeContext.log_dir = args.log_dir - runtimeContext.secret_store = getdefault( - runtimeContext.secret_store, SecretStore() - ) - runtimeContext.make_fs_access = getdefault( - runtimeContext.make_fs_access, StdFsAccess - ) + runtimeContext.secret_store = getdefault(runtimeContext.secret_store, SecretStore()) + runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess) if not executor: if args.parallel: @@ -1305,13 +1248,9 @@ def main( tfjob_order.update(loadingContext.jobdefaults) if job_order_object: tfjob_order.update(job_order_object) - tfout, tfstatus = real_executor( - tool.embedded_tool, tfjob_order, runtimeContext - ) + tfout, tfstatus = real_executor(tool.embedded_tool, tfjob_order, runtimeContext) if not tfout or tfstatus != "success": - raise WorkflowException( - "ProcessGenerator failed to generate workflow" - ) + raise WorkflowException("ProcessGenerator failed to generate workflow") tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext) if not job_order_object: job_order_object = None @@ -1340,12 +1279,8 @@ def main( del args.workflow del args.job_order - conf_file = getattr( - args, "beta_dependency_resolvers_configuration", None - ) # str - use_conda_dependencies = getattr( - args, "beta_conda_dependencies", None - ) # str + conf_file = getattr(args, "beta_dependency_resolvers_configuration", None) # str + use_conda_dependencies = getattr(args, "beta_conda_dependencies", None) # str if conf_file or use_conda_dependencies: runtimeContext.job_script_provider = DependenciesConfiguration(args) @@ -1398,9 +1333,7 @@ def loc_to_path(obj: CWLObjectType) -> None: if args.write_summary: with open(args.write_summary, "w") as output_file: - json_dump( - out, output_file, indent=4, ensure_ascii=False, default=str - ) + json_dump(out, output_file, indent=4, ensure_ascii=False, default=str) else: json_dump(out, stdout, indent=4, ensure_ascii=False, default=str) if hasattr(stdout, "flush"): @@ -1412,10 +1345,8 @@ def loc_to_path(obj: CWLObjectType) -> None: _logger.info("Final process status is %s", status) return 0 - except (ValidationException) as exc: - _logger.error( - "Input object failed validation:\n%s", str(exc), exc_info=args.debug - ) + except ValidationException as exc: + _logger.error("Input object failed validation:\n%s", str(exc), exc_info=args.debug) return 1 except UnsupportedRequirement as exc: _logger.error( @@ -1451,9 +1382,7 @@ def loc_to_path(obj: CWLObjectType) -> None: ): research_obj = runtimeContext.research_obj if loadingContext.loader is not None: - research_obj.generate_snapshot( - prov_deps(workflowobj, loadingContext.loader, uri) - ) + research_obj.generate_snapshot(prov_deps(workflowobj, loadingContext.loader, uri)) else: _logger.warning( "Unable to generate provenance snapshot " @@ -1461,9 +1390,7 @@ def loc_to_path(obj: CWLObjectType) -> None: ) if prov_log_handler is not None: # Stop logging so we won't half-log adding ourself to RO - _logger.debug( - "[provenance] Closing provenance log file %s", prov_log_handler - ) + _logger.debug("[provenance] Closing provenance log file %s", prov_log_handler) _logger.removeHandler(prov_log_handler) # Ensure last log lines are written out prov_log_handler.flush() @@ -1486,9 +1413,7 @@ def find_default_container( ) -> Optional[str]: """Find a container.""" if not default_container and use_biocontainers: - default_container = get_container_from_software_requirements( - use_biocontainers, builder - ) + default_container = get_container_from_software_requirements(use_biocontainers, builder) return default_container diff --git a/cwltool/mutation.py b/cwltool/mutation.py index c7d28ed75..077b92cb7 100644 --- a/cwltool/mutation.py +++ b/cwltool/mutation.py @@ -73,9 +73,7 @@ def register_mutation(self, stepname: str, obj: CWLObjectType) -> None: ) ) - self.generations[loc] = MutationState( - current.generation + 1, current.readers, stepname - ) + self.generations[loc] = MutationState(current.generation + 1, current.readers, stepname) def set_generation(self, obj: CWLObjectType) -> None: loc = cast(str, obj["location"]) diff --git a/cwltool/pack.py b/cwltool/pack.py index cd26d3483..d438f0409 100644 --- a/cwltool/pack.py +++ b/cwltool/pack.py @@ -118,7 +118,6 @@ def pack( rewrite_out: Optional[Dict[str, str]] = None, loader: Optional[Loader] = None, ) -> CWLObjectType: - # The workflow document we have in memory right now may have been # updated to the internal CWL version. We need to reload the # document to go back to its original version. @@ -155,16 +154,12 @@ def pack( document_loader.idx[po["id"]] = CommentedMap(po.items()) document_loader.idx[metadata["id"]] = CommentedMap(metadata.items()) - found_versions = { - cast(str, loadingContext.metadata["cwlVersion"]) - } # type: Set[str] + found_versions = {cast(str, loadingContext.metadata["cwlVersion"])} # type: Set[str] def loadref(base: Optional[str], lr_uri: str) -> ResolveType: lr_loadingContext = loadingContext.copy() lr_loadingContext.metadata = {} - lr_loadingContext, lr_workflowobj, lr_uri = fetch_document( - lr_uri, lr_loadingContext - ) + lr_loadingContext, lr_workflowobj, lr_uri = fetch_document(lr_uri, lr_loadingContext) lr_loadingContext, lr_uri = resolve_and_validate_document( lr_loadingContext, lr_workflowobj, lr_uri ) @@ -204,9 +199,7 @@ def loadref(base: Optional[str], lr_uri: str) -> ResolveType: mainpath, _ = urllib.parse.urldefrag(uri) - def rewrite_id( - r: str, mainuri: str, rewrite: Dict[str, str], names: Set[str] - ) -> None: + def rewrite_id(r: str, mainuri: str, rewrite: Dict[str, str], names: Set[str]) -> None: if r == mainuri: rewrite[r] = "#main" elif r.startswith(mainuri) and r[len(mainuri)] in ("#", "/"): @@ -230,9 +223,7 @@ def rewrite_id( for r in sorted_output_ids: rewrite_id(r, uri, rewrite_outputs, output_names) - packed = CommentedMap( - (("$graph", CommentedSeq()), ("cwlVersion", update_to_version)) - ) + packed = CommentedMap((("$graph", CommentedSeq()), ("cwlVersion", update_to_version))) namespaces = metadata.get("$namespaces", None) schemas: Set[str] = set() @@ -301,9 +292,7 @@ def rewrite_id( v + "/", ) - for r in list( - rewrite_inputs.keys() - ): # again, to process the outputSource references + for r in list(rewrite_inputs.keys()): # again, to process the outputSource references v = rewrite_inputs[r] replace_refs(packed, rewrite_inputs, r + "/" if "#" in r else r + "#", v + "/") diff --git a/cwltool/pathmapper.py b/cwltool/pathmapper.py index 0e9c240a7..7bc130883 100644 --- a/cwltool/pathmapper.py +++ b/cwltool/pathmapper.py @@ -15,9 +15,7 @@ from .stdfsaccess import abspath from .utils import CWLObjectType, dedup, downloadHttpFile -MapperEnt = collections.namedtuple( - "MapperEnt", ["resolved", "target", "type", "staged"] -) +MapperEnt = collections.namedtuple("MapperEnt", ["resolved", "target", "type", "staged"]) """ Mapper entries. .. py:attribute:: resolved @@ -188,7 +186,6 @@ def visit( ) def setup(self, referenced_files: List[CWLObjectType], basedir: str) -> None: - # Go through each file and set the target to its own directory along # with any secondary files. stagedir = self.stagedir @@ -236,9 +233,8 @@ def reversemap( return (k, v[0]) return None - def update( - self, key: str, resolved: str, target: str, ctype: str, stage: bool - ) -> MapperEnt: + def update(self, key: str, resolved: str, target: str, ctype: str, stage: bool) -> MapperEnt: + """Update an existine entry.""" m = MapperEnt(resolved, target, ctype, stage) self._pathmap[key] = m return m diff --git a/cwltool/process.py b/cwltool/process.py index 1ec8ca985..65c67c1fd 100644 --- a/cwltool/process.py +++ b/cwltool/process.py @@ -96,9 +96,7 @@ def filter(self, record: logging.LogRecord) -> bool: _logger_validation_warnings = logging.getLogger("cwltool.validation_warnings") _logger_validation_warnings.setLevel(_logger.getEffectiveLevel()) -_logger_validation_warnings.addFilter( - LogAsDebugFilter("cwltool.validation_warnings", _logger) -) +_logger_validation_warnings.addFilter(LogAsDebugFilter("cwltool.validation_warnings", _logger)) supportedProcessRequirements = [ "DockerRequirement", @@ -188,7 +186,6 @@ def use_custom_schema(version: str, name: str, text: str) -> None: def get_schema( version: str, ) -> Tuple[Loader, Union[Names, SchemaParseException], CWLObjectType, Loader]: - if version in SCHEMA_CACHE: return SCHEMA_CACHE[version] @@ -210,9 +207,9 @@ def get_schema( __name__, f"schemas/{version}/salad/schema_salad/metaschema/{f}", ) - cache[ - "https://w3id.org/cwl/salad/schema_salad/metaschema/" + f - ] = res.read().decode("UTF-8") + cache["https://w3id.org/cwl/salad/schema_salad/metaschema/" + f] = res.read().decode( + "UTF-8" + ) res.close() except OSError: pass @@ -265,9 +262,7 @@ def stage_files( while tgt in targets: i += 1 tgt = f"{entry.target}_{i}" - targets[tgt] = pathmapper.update( - key, entry.resolved, tgt, entry.type, entry.staged - ) + targets[tgt] = pathmapper.update(key, entry.resolved, tgt, entry.type, entry.staged) else: raise WorkflowException( "File staging conflict, trying to stage both %s and %s to the same target %s" @@ -347,9 +342,7 @@ def _relocate(src: str, dst: str) -> None: return # If the source is not contained in source_directories we're not allowed to delete it - src_can_deleted = any( - os.path.commonprefix([p, src]) == p for p in source_directories - ) + src_can_deleted = any(os.path.commonprefix([p, src]) == p for p in source_directories) _action = "move" if action == "move" and src_can_deleted else "copy" @@ -398,9 +391,7 @@ def _check_adjust(a_file: CWLObjectType) -> CWLObjectType: visit_class(outputObj, ("File", "Directory"), _check_adjust) if compute_checksum: - visit_class( - outputObj, ("File",), functools.partial(compute_checksums, fs_access) - ) + visit_class(outputObj, ("File",), functools.partial(compute_checksums, fs_access)) return outputObj @@ -445,8 +436,7 @@ def fill_in_defaults( job[fieldname] = None else: raise WorkflowException( - "Missing required input parameter '%s'" - % shortname(cast(str, inp["id"])) + "Missing required input parameter '%s'" % shortname(cast(str, inp["id"])) ) @@ -471,9 +461,7 @@ def avroize_type( cast(MutableSequence[CWLOutputType], field_type["items"]), name_prefix ) else: - field_type["type"] = avroize_type( - cast(CWLOutputType, field_type["type"]), name_prefix - ) + field_type["type"] = avroize_type(cast(CWLOutputType, field_type["type"]), name_prefix) elif field_type == "File": return "org.w3id.cwl.cwl.File" elif field_type == "Directory": @@ -481,14 +469,11 @@ def avroize_type( return field_type -def get_overrides( - overrides: MutableSequence[CWLObjectType], toolid: str -) -> CWLObjectType: +def get_overrides(overrides: MutableSequence[CWLObjectType], toolid: str) -> CWLObjectType: + """Combine overrides for the target tool ID.""" req: CWLObjectType = {} if not isinstance(overrides, MutableSequence): - raise ValidationException( - "Expected overrides to be a list, but was %s" % type(overrides) - ) + raise ValidationException("Expected overrides to be a list, but was %s" % type(overrides)) for ov in overrides: if ov["overrideTarget"] == toolid: req.update(ov) @@ -550,9 +535,9 @@ def eval_resource( @mypyc_attr(allow_interpreted_subclasses=True) class Process(HasReqsHints, metaclass=abc.ABCMeta): - def __init__( - self, toolpath_object: CommentedMap, loadingContext: LoadingContext - ) -> None: + """Abstract CWL Process.""" + + def __init__(self, toolpath_object: CommentedMap, loadingContext: LoadingContext) -> None: """Build a Process object from the provided dictionary.""" super().__init__() self.metadata: CWLObjectType = getdefault(loadingContext.metadata, {}) @@ -580,9 +565,7 @@ def __init__( self.requirements = copy.deepcopy(getdefault(loadingContext.requirements, [])) tool_requirements = self.tool.get("requirements", []) if tool_requirements is None: - raise SourceLine( - self.tool, "requirements", ValidationException, debug - ).makeError( + raise SourceLine(self.tool, "requirements", ValidationException, debug).makeError( "If 'requirements' is present then it must be a list " "or map/dictionary, not empty." ) @@ -592,17 +575,16 @@ def __init__( self.requirements.extend( cast( List[CWLObjectType], - get_overrides( - getdefault(loadingContext.overrides_list, []), self.tool["id"] - ).get("requirements", []), + get_overrides(getdefault(loadingContext.overrides_list, []), self.tool["id"]).get( + "requirements", [] + ), ) ) self.hints = copy.deepcopy(getdefault(loadingContext.hints, [])) tool_hints = self.tool.get("hints", []) if tool_hints is None: raise SourceLine(self.tool, "hints", ValidationException, debug).makeError( - "If 'hints' is present then it must be a list " - "or map/dictionary, not empty." + "If 'hints' is present then it must be a list " "or map/dictionary, not empty." ) self.hints.extend(tool_hints) # Versions of requirements and hints which aren't mutated. @@ -658,9 +640,7 @@ def __init__( del c["id"] if "type" not in c: - raise ValidationException( - "Missing 'type' in parameter '{}'".format(c["name"]) - ) + raise ValidationException("Missing 'type' in parameter '{}'".format(c["name"])) if "default" in c and "null" not in aslist(c["type"]): nullable = ["null"] @@ -671,13 +651,9 @@ def __init__( c["type"] = avroize_type(c["type"], c["name"]) if key == "inputs": - cast( - List[CWLObjectType], self.inputs_record_schema["fields"] - ).append(c) + cast(List[CWLObjectType], self.inputs_record_schema["fields"]).append(c) elif key == "outputs": - cast( - List[CWLObjectType], self.outputs_record_schema["fields"] - ).append(c) + cast(List[CWLObjectType], self.outputs_record_schema["fields"]).append(c) with SourceLine(toolpath_object, "inputs", ValidationException, debug): self.inputs_record_schema = cast( @@ -755,10 +731,7 @@ def __init__( else: var_spool_cwl_detector(self.tool) - def _init_job( - self, joborder: CWLObjectType, runtime_context: RuntimeContext - ) -> Builder: - + def _init_job(self, joborder: CWLObjectType, runtime_context: RuntimeContext) -> Builder: if self.metadata.get("cwlVersion") != INTERNAL_VERSION: raise WorkflowException( "Process object loaded with version '%s', must update to '%s' in order to execute." @@ -785,9 +758,7 @@ def _init_job( normalizeFilesDirs(job) schema = self.names.get_name("input_record_schema", None) if schema is None: - raise WorkflowException( - "Missing input record schema: " "{}".format(self.names) - ) + raise WorkflowException("Missing input record schema: " "{}".format(self.names)) validate_ex( schema, job, @@ -812,9 +783,7 @@ def _init_job( def inc(d: List[int]) -> None: d[0] += 1 - visit_class( - v, ("Directory",), lambda x: inc(dircount) # noqa: B023 - ) + visit_class(v, ("Directory",), lambda x: inc(dircount)) # noqa: B023 if dircount[0] == 0: continue filecount = [0] @@ -1013,16 +982,12 @@ def evalResources( if rsc.get(a + "Min"): mn = cast( Union[int, float], - eval_resource( - builder, cast(Union[str, int, float], rsc[a + "Min"]) - ), + eval_resource(builder, cast(Union[str, int, float], rsc[a + "Min"])), ) if rsc.get(a + "Max"): mx = cast( Union[int, float], - eval_resource( - builder, cast(Union[str, int, float], rsc[a + "Max"]) - ), + eval_resource(builder, cast(Union[str, int, float], rsc[a + "Max"])), ) if mn is None: mn = mx @@ -1060,20 +1025,14 @@ def checkRequirements( for i, entry in enumerate( cast(MutableSequence[CWLObjectType], rec["requirements"]) ): - with SourceLine( - rec["requirements"], i, UnsupportedRequirement, debug - ): - if ( - cast(str, entry["class"]) - not in supported_process_requirements - ): + with SourceLine(rec["requirements"], i, UnsupportedRequirement, debug): + if cast(str, entry["class"]) not in supported_process_requirements: raise UnsupportedRequirement( f"Unsupported requirement {entry['class']}." ) - def validate_hints( - self, avsc_names: Names, hints: List[CWLObjectType], strict: bool - ) -> None: + def validate_hints(self, avsc_names: Names, hints: List[CWLObjectType], strict: bool) -> None: + """Process the hints field.""" if self.doc_loader is None: return debug = _logger.isEnabledFor(logging.DEBUG) @@ -1090,9 +1049,7 @@ def validate_hints( avroname = avro_type_name(self.doc_loader.vocab[classname]) if avsc_names.get_name(avroname, None) is not None: plain_hint = { - key: r[key] - for key in r - if key not in self.doc_loader.identifiers + key: r[key] for key in r if key not in self.doc_loader.identifiers } # strip identifiers validate_ex( cast( @@ -1178,9 +1135,9 @@ def mergedirs( if e.get("listing"): # name already in entries # merge it into the existing listing - cast( - List[CWLObjectType], ents[basename].setdefault("listing", []) - ).extend(cast(List[CWLObjectType], e["listing"])) + cast(List[CWLObjectType], ents[basename].setdefault("listing", [])).extend( + cast(List[CWLObjectType], e["listing"]) + ) for e in ents.values(): if e["class"] == "Directory" and "listing" in e: e["listing"] = cast( diff --git a/cwltool/procgenerator.py b/cwltool/procgenerator.py index cf148ca90..18d02112f 100644 --- a/cwltool/procgenerator.py +++ b/cwltool/procgenerator.py @@ -23,9 +23,8 @@ def __init__(self, procgenerator: "ProcessGenerator") -> None: self.jobout = None # type: Optional[CWLObjectType] self.processStatus = None # type: Optional[str] - def receive_output( - self, jobout: Optional[CWLObjectType], processStatus: str - ) -> None: + def receive_output(self, jobout: Optional[CWLObjectType], processStatus: str) -> None: + """Process the results.""" self.jobout = jobout self.processStatus = processStatus @@ -35,7 +34,6 @@ def job( output_callbacks: Optional[OutputCallbackType], runtimeContext: RuntimeContext, ) -> JobsGeneratorType: - try: yield from self.procgenerator.embedded_tool.job( job_order, self.receive_output, runtimeContext @@ -95,9 +93,7 @@ def job( output_callbacks: Optional[OutputCallbackType], runtimeContext: RuntimeContext, ) -> JobsGeneratorType: - return ProcessGeneratorJob(self).job( - job_order, output_callbacks, runtimeContext - ) + return ProcessGeneratorJob(self).job(job_order, output_callbacks, runtimeContext) def result( self, diff --git a/cwltool/provenance.py b/cwltool/provenance.py index 4044a7362..4a88fe67d 100644 --- a/cwltool/provenance.py +++ b/cwltool/provenance.py @@ -99,9 +99,7 @@ def __init__(self, research_object: "ResearchObject", rel_path: str) -> None: SHA512: hashlib.sha512(), } # Open file in Research Object folder - path = os.path.abspath( - os.path.join(research_object.folder, local_path(rel_path)) - ) + path = os.path.abspath(os.path.join(research_object.folder, local_path(rel_path))) if not path.startswith(os.path.abspath(research_object.folder)): raise ValueError("Path is outside Research Object: %s" % path) _logger.debug("[provenance] Creating WritableBagFile at %s.", path) @@ -334,9 +332,8 @@ def _initialize_bagit(self) -> None: bag_it_file.write("BagIt-Version: 0.97\n") bag_it_file.write("Tag-File-Character-Encoding: %s\n" % ENCODING) - def open_log_file_for_activity( - self, uuid_uri: str - ) -> Union[TextIOWrapper, WritableBagFile]: + def open_log_file_for_activity(self, uuid_uri: str) -> Union[TextIOWrapper, WritableBagFile]: + """Begin the per-activity log.""" self.self_check() # Ensure valid UUID for safe filenames activity_uuid = uuid.UUID(uuid_uri) @@ -405,14 +402,10 @@ def write_bag_file( bag_file = WritableBagFile(self, path) if encoding is not None: # encoding: match Tag-File-Character-Encoding: UTF-8 - return TextIOWrapper( - cast(BinaryIO, bag_file), encoding=encoding, newline="\n" - ) + return TextIOWrapper(cast(BinaryIO, bag_file), encoding=encoding, newline="\n") return bag_file - def add_tagfile( - self, path: str, timestamp: Optional[datetime.datetime] = None - ) -> None: + def add_tagfile(self, path: str, timestamp: Optional[datetime.datetime] = None) -> None: """Add tag files to our research object.""" self.self_check() checksums = {} @@ -498,10 +491,7 @@ def guess_mediatype( # cwlVersion = "v1.0" conformsTo = conforms_to[extension] - if ( - rel_path.startswith(posix_path(PROVENANCE)) - and extension in prov_conforms_to - ): + if rel_path.startswith(posix_path(PROVENANCE)) and extension in prov_conforms_to: if ".cwlprov" in rel_path: # Our own! conformsTo = [ @@ -516,7 +506,6 @@ def guess_mediatype( aggregates = [] # type: List[Aggregate] for path in self.bagged_size.keys(): - temp_path = PurePosixPath(path) folder = temp_path.parent filename = temp_path.name @@ -557,9 +546,7 @@ def guess_mediatype( for path in self.tagfiles: if not ( - path.startswith(METADATA) - or path.startswith(WORKFLOW) - or path.startswith(SNAPSHOT) + path.startswith(METADATA) or path.startswith(WORKFLOW) or path.startswith(SNAPSHOT) ): # probably a bagit file continue @@ -591,9 +578,8 @@ def guess_mediatype( aggregates.extend(self._external_aggregates) return aggregates - def add_uri( - self, uri: str, timestamp: Optional[datetime.datetime] = None - ) -> Aggregate: + def add_uri(self, uri: str, timestamp: Optional[datetime.datetime] = None) -> Aggregate: + """Create and store an aggreate for the given URI.""" self.self_check() aggr = {"uri": uri} # type: Aggregate aggr["createdOn"], aggr["createdBy"] = self._self_made(timestamp=timestamp) @@ -685,7 +671,6 @@ def _authored_by(self) -> Optional[AuthoredBy]: return None def _write_ro_manifest(self) -> None: - # Does not have to be this order, but it's nice to be consistent filename = "manifest.json" createdOn, createdBy = self._self_made() @@ -713,18 +698,13 @@ def _write_ro_manifest(self) -> None: manifest_file.write(json_manifest) def _write_bag_info(self) -> None: - with self.write_bag_file("bag-info.txt") as info_file: info_file.write("Bag-Software-Agent: %s\n" % self.cwltool_version) # FIXME: require sha-512 of payload to comply with profile? # FIXME: Update profile - info_file.write( - "BagIt-Profile-Identifier: https://w3id.org/ro/bagit/profile\n" - ) + info_file.write("BagIt-Profile-Identifier: https://w3id.org/ro/bagit/profile\n") info_file.write("Bagging-Date: %s\n" % datetime.date.today().isoformat()) - info_file.write( - "External-Description: Research Object of CWL workflow run\n" - ) + info_file.write("External-Description: Research Object of CWL workflow run\n") if self.full_name: info_file.write("Contact-Name: %s\n" % self.full_name) @@ -759,9 +739,7 @@ def generate_snapshot(self, prov_dep: CWLObjectType) -> None: shutil.copytree(filepath, path) else: shutil.copy(filepath, path) - timestamp = datetime.datetime.fromtimestamp( - os.path.getmtime(filepath) - ) + timestamp = datetime.datetime.fromtimestamp(os.path.getmtime(filepath)) self.add_tagfile(path, timestamp) except PermissionError: pass # FIXME: avoids duplicate snapshotting; need better solution @@ -796,9 +774,7 @@ def add_data_file( """Copy inputs to data/ folder.""" self.self_check() tmp_dir, tmp_prefix = os.path.split(self.temp_prefix) - with tempfile.NamedTemporaryFile( - prefix=tmp_prefix, dir=tmp_dir, delete=False - ) as tmp: + with tempfile.NamedTemporaryFile(prefix=tmp_prefix, dir=tmp_dir, delete=False) as tmp: checksum = checksum_copy(from_fp, tmp) # Calculate hash-based file path @@ -817,9 +793,7 @@ def add_data_file( if Hasher == hashlib.sha1: self._add_to_bagit(rel_path, sha1=checksum) else: - _logger.warning( - "[provenance] Unknown hash method %s for bagit manifest", Hasher - ) + _logger.warning("[provenance] Unknown hash method %s for bagit manifest", Hasher) # Inefficient, bagit support need to checksum again self._add_to_bagit(rel_path) _logger.debug("[provenance] Added data file %s", path) @@ -859,14 +833,12 @@ def add_to_manifest(self, rel_path: str, checksums: Dict[str, str]) -> None: manifest = "tagmanifest" # Add checksums to corresponding manifest files - for (method, hash_value) in checksums.items(): + for method, hash_value in checksums.items(): # File not in manifest because we bailed out on # existence in bagged_size above manifestpath = os.path.join(self.folder, f"{manifest}-{method.lower()}.txt") # encoding: match Tag-File-Character-Encoding: UTF-8 - with open( - manifestpath, "a", encoding=ENCODING, newline="\n" - ) as checksum_file: + with open(manifestpath, "a", encoding=ENCODING, newline="\n") as checksum_file: line = f"{hash_value} {rel_path}\n" _logger.debug("[provenance] Added to %s: %s", manifestpath, line) checksum_file.write(line) @@ -892,9 +864,7 @@ def _add_to_bagit(self, rel_path: str, **checksums: str) -> None: self.add_to_manifest(rel_path, checksums) - def create_job( - self, builder_job: CWLObjectType, is_output: bool = False - ) -> CWLObjectType: + def create_job(self, builder_job: CWLObjectType, is_output: bool = False) -> CWLObjectType: # TODO customise the file """Generate the new job object with RO specific relative paths.""" copied = copy.deepcopy(builder_job) @@ -954,9 +924,7 @@ def _relativise_files( # Register in RO; but why was this not picked # up by used_artefacts? _logger.info("[provenance] Adding to RO %s", structure["location"]) - with self.fsaccess.open( - cast(str, structure["location"]), "rb" - ) as fp: + with self.fsaccess.open(cast(str, structure["location"]), "rb") as fp: relative_path = self.add_data_file(fp) checksum = PurePosixPath(relative_path).name structure["checksum"] = f"{SHA1}${checksum}" diff --git a/cwltool/provenance_profile.py b/cwltool/provenance_profile.py index 137a5af4b..3b2b2b586 100644 --- a/cwltool/provenance_profile.py +++ b/cwltool/provenance_profile.py @@ -54,9 +54,7 @@ from .provenance import ResearchObject -def copy_job_order( - job: Union[Process, JobsType], job_order_object: CWLObjectType -) -> CWLObjectType: +def copy_job_order(job: Union[Process, JobsType], job_order_object: CWLObjectType) -> CWLObjectType: """Create copy of job object for provenance.""" if not isinstance(job, WorkflowJob): # direct command line tool execution @@ -172,9 +170,7 @@ def host_provenance(document: ProvDocument) -> None: ) ro_identifier_workflow = self.research_object.base_uri + "workflow/packed.cwl#" self.wf_ns = self.document.add_namespace("wf", ro_identifier_workflow) - ro_identifier_input = ( - self.research_object.base_uri + "workflow/primary-job.json#" - ) + ro_identifier_input = self.research_object.base_uri + "workflow/primary-job.json#" self.document.add_namespace("input", ro_identifier_input) # More info about the account (e.g. username, fullname) @@ -225,9 +221,7 @@ def host_provenance(document: ProvDocument) -> None: ) # association between SoftwareAgent and WorkflowRun main_workflow = "wf:main" - self.document.wasAssociatedWith( - self.workflow_run_uri, self.engine_uuid, main_workflow - ) + self.document.wasAssociatedWith(self.workflow_run_uri, self.engine_uuid, main_workflow) self.document.wasStartedBy( self.workflow_run_uri, None, self.engine_uuid, datetime.datetime.now() ) @@ -286,9 +280,7 @@ def start_process( self.document.wasAssociatedWith( process_run_id, self.engine_uuid, str("wf:main/" + process_name) ) - self.document.wasStartedBy( - process_run_id, None, self.workflow_run_uri, when, None, None - ) + self.document.wasStartedBy(process_run_id, None, self.workflow_run_uri, when, None, None) return process_run_id def record_process_end( @@ -320,9 +312,7 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st relative_path = self.research_object.add_data_file(fhandle) # FIXME: This naively relies on add_data_file setting hash as filename checksum = PurePath(relative_path).name - entity = self.document.entity( - "data:" + checksum, {PROV_TYPE: WFPROV["Artifact"]} - ) + entity = self.document.entity("data:" + checksum, {PROV_TYPE: WFPROV["Artifact"]}) if "checksum" not in value: value["checksum"] = f"{SHA1}${checksum}" @@ -332,9 +322,7 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st # By here one of them should have worked! if not entity or not checksum: - raise ValueError( - "class:File but missing checksum/location/content: %r" % value - ) + raise ValueError("class:File but missing checksum/location/content: %r" % value) # Track filename and extension, this is generally useful only for # secondaryFiles. Note that multiple uses of a file might thus record @@ -348,23 +336,15 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st ) if "basename" in value: - file_entity.add_attributes( - {CWLPROV["basename"]: cast(str, value["basename"])} - ) + file_entity.add_attributes({CWLPROV["basename"]: cast(str, value["basename"])}) if "nameroot" in value: - file_entity.add_attributes( - {CWLPROV["nameroot"]: cast(str, value["nameroot"])} - ) + file_entity.add_attributes({CWLPROV["nameroot"]: cast(str, value["nameroot"])}) if "nameext" in value: - file_entity.add_attributes( - {CWLPROV["nameext"]: cast(str, value["nameext"])} - ) + file_entity.add_attributes({CWLPROV["nameext"]: cast(str, value["nameext"])}) self.document.specializationOf(file_entity, entity) # Check for secondaries - for sec in cast( - MutableSequence[CWLObjectType], value.get("secondaryFiles", []) - ): + for sec in cast(MutableSequence[CWLObjectType], value.get("secondaryFiles", [])): # TODO: Record these in a specializationOf entity with UUID? if sec["class"] == "File": (sec_entity, _, _) = self.declare_file(sec) @@ -478,9 +458,7 @@ def declare_directory(self, value: CWLObjectType) -> ProvEntity: ore_doc_path = str(PurePosixPath(METADATA, ore_doc_fn)) with self.research_object.write_bag_file(ore_doc_path) as provenance_file: ore_doc.serialize(provenance_file, format="rdf", rdf_format="turtle") - self.research_object.add_annotation( - dir_id, [ore_doc_fn], ORE["isDescribedBy"].uri - ) + self.research_object.add_annotation(dir_id, [ore_doc_fn], ORE["isDescribedBy"].uri) if is_empty: # Empty directory @@ -570,7 +548,7 @@ def declare_artefact(self, value: Any) -> ProvEntity: # Let's iterate and recurse coll_attribs: List[Tuple[Union[str, Identifier], Any]] = [] - for (key, val) in value.items(): + for key, val in value.items(): v_ent = self.declare_artefact(val) self.document.membership(coll, v_ent) m_entity = self.document.entity(uuid.uuid4().urn) @@ -578,9 +556,7 @@ def declare_artefact(self, value: Any) -> ProvEntity: # https://www.w3.org/TR/prov-dictionary/#dictionary-ontological-definition # as prov.py do not easily allow PROV-N extensions m_entity.add_asserted_type(PROV["KeyEntityPair"]) - m_entity.add_attributes( - {PROV["pairKey"]: str(key), PROV["pairEntity"]: v_ent} - ) + m_entity.add_attributes({PROV["pairKey"]: str(key), PROV["pairEntity"]: v_ent}) coll_attribs.append((PROV["hadDictionaryMember"], m_entity)) coll.add_attributes(coll_attribs) self.research_object.add_uri(coll.identifier.uri) @@ -722,9 +698,7 @@ def prospective_prov(self, job: JobsType) -> None: ) # TODO: Declare roles/parameters as well - def activity_has_provenance( - self, activity: str, prov_ids: Sequence[Identifier] - ) -> None: + def activity_has_provenance(self, activity: str, prov_ids: Sequence[Identifier]) -> None: """Add http://www.w3.org/TR/prov-aq/ relations to nested PROV files.""" # NOTE: The below will only work if the corresponding metadata/provenance arcp URI # is a pre-registered namespace in the PROV Document @@ -765,9 +739,7 @@ def finalize_prov_profile(self, name: Optional[str]) -> List[QualifiedName]: prov_ids.append(self.provenance_ns[filename + ".xml"]) # https://www.w3.org/TR/prov-n/ - with self.research_object.write_bag_file( - basename + ".provn" - ) as provenance_file: + with self.research_object.write_bag_file(basename + ".provn") as provenance_file: self.document.serialize(provenance_file, format="provn", indent=2) prov_ids.append(self.provenance_ns[filename + ".provn"]) @@ -786,18 +758,14 @@ def finalize_prov_profile(self, name: Optional[str]) -> List[QualifiedName]: # https://www.w3.org/TR/n-triples/ with self.research_object.write_bag_file(basename + ".nt") as provenance_file: - self.document.serialize( - provenance_file, format="rdf", rdf_format="ntriples" - ) + self.document.serialize(provenance_file, format="rdf", rdf_format="ntriples") prov_ids.append(self.provenance_ns[filename + ".nt"]) # https://www.w3.org/TR/json-ld/ # TODO: Use a nice JSON-LD context # see also https://eprints.soton.ac.uk/395985/ # 404 Not Found on https://provenance.ecs.soton.ac.uk/prov.jsonld :( - with self.research_object.write_bag_file( - basename + ".jsonld" - ) as provenance_file: + with self.research_object.write_bag_file(basename + ".jsonld") as provenance_file: self.document.serialize(provenance_file, format="rdf", rdf_format="json-ld") prov_ids.append(self.provenance_ns[filename + ".jsonld"]) diff --git a/cwltool/resolver.py b/cwltool/resolver.py index 771ba3ea5..e48957f26 100644 --- a/cwltool/resolver.py +++ b/cwltool/resolver.py @@ -25,13 +25,9 @@ def resolve_local(document_loader: Optional[Loader], uri: str) -> Optional[str]: return pathobj.as_uri() sharepaths = [ - os.environ.get( - "XDG_DATA_HOME", os.path.join(os.path.expanduser("~"), ".local", "share") - ) + os.environ.get("XDG_DATA_HOME", os.path.join(os.path.expanduser("~"), ".local", "share")) ] - sharepaths.extend( - os.environ.get("XDG_DATA_DIRS", "/usr/local/share/:/usr/share/").split(":") - ) + sharepaths.extend(os.environ.get("XDG_DATA_DIRS", "/usr/local/share/:/usr/share/").split(":")) shares = [os.path.join(s, "commonwl", uri) for s in sharepaths] _logger.debug("Search path is %s", shares) @@ -63,9 +59,7 @@ def tool_resolver(document_loader: Loader, uri: str) -> Optional[str]: # TODO not stripping off "descriptor" when looking for local imports would also # work https://github.com/ga4gh/tool-registry-service-schemas/blob/2.0.0-beta.2/src/main/resources/swagger/ga4gh-tool-discovery.yaml#L273 # noqa: B950 GA4GH_TRS_FILES = "{0}/api/ga4gh/v2/tools/{1}/versions/{2}/CWL/files" -GA4GH_TRS_PRIMARY_DESCRIPTOR = ( - "{0}/api/ga4gh/v2/tools/{1}/versions/{2}/plain-CWL/descriptor/{3}" -) +GA4GH_TRS_PRIMARY_DESCRIPTOR = "{0}/api/ga4gh/v2/tools/{1}/versions/{2}/plain-CWL/descriptor/{3}" def resolve_ga4gh_tool(document_loader: Loader, uri: str) -> Optional[str]: diff --git a/cwltool/run_job.py b/cwltool/run_job.py index ac2f1025d..a8fe32496 100644 --- a/cwltool/run_job.py +++ b/cwltool/run_job.py @@ -12,9 +12,7 @@ def handle_software_environment(cwl_env: Dict[str, str], script: str) -> Dict[st exec_env["_CWLTOOL"] = "1" res = subprocess.run(["bash", script], shell=False, env=exec_env) # nosec if res.returncode != 0: - sys.stderr.write( - "Error while using SoftwareRequirements to modify environment\n" - ) + sys.stderr.write("Error while using SoftwareRequirements to modify environment\n") return cwl_env env = cwl_env.copy() diff --git a/cwltool/singularity.py b/cwltool/singularity.py index 887569396..4f6a46d3a 100644 --- a/cwltool/singularity.py +++ b/cwltool/singularity.py @@ -55,9 +55,7 @@ def get_version() -> Tuple[List[int], str]: _SINGULARITY_VERSION = [int(i) for i in version_string.split(".")] _SINGULARITY_FLAVOR = version_match.group(1) - _logger.debug( - f"Singularity version: {version_string}" " ({_SINGULARITY_FLAVOR}." - ) + _logger.debug(f"Singularity version: {version_string}" " ({_SINGULARITY_FLAVOR}.") return (_SINGULARITY_VERSION, _SINGULARITY_FLAVOR) @@ -157,13 +155,8 @@ def get_image( elif is_version_2_6() and "SINGULARITY_PULLFOLDER" in os.environ: cache_folder = os.environ["SINGULARITY_PULLFOLDER"] - if ( - "dockerImageId" not in dockerRequirement - and "dockerPull" in dockerRequirement - ): - match = re.search( - pattern=r"([a-z]*://)", string=dockerRequirement["dockerPull"] - ) + if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement: + match = re.search(pattern=r"([a-z]*://)", string=dockerRequirement["dockerPull"]) img_name = _normalize_image_id(dockerRequirement["dockerPull"]) candidates.append(img_name) if is_version_3_or_newer(): @@ -173,9 +166,7 @@ def get_image( else: dockerRequirement["dockerImageId"] = img_name if not match: - dockerRequirement["dockerPull"] = ( - "docker://" + dockerRequirement["dockerPull"] - ) + dockerRequirement["dockerPull"] = "docker://" + dockerRequirement["dockerPull"] elif "dockerImageId" in dockerRequirement: if os.path.isfile(dockerRequirement["dockerImageId"]): found = True @@ -222,9 +213,7 @@ def get_image( "pull", "--force", "--name", - "{}/{}".format( - cache_folder, dockerRequirement["dockerImageId"] - ), + "{}/{}".format(cache_folder, dockerRequirement["dockerImageId"]), str(dockerRequirement["dockerPull"]), ] @@ -303,16 +292,13 @@ def get_from_requirements( raise WorkflowException("singularity executable is not available") if not self.get_image(cast(Dict[str, str], r), pull_image, force_pull): - raise WorkflowException( - "Container image {} not found".format(r["dockerImageId"]) - ) + raise WorkflowException("Container image {} not found".format(r["dockerImageId"])) return os.path.abspath(cast(str, r["dockerImageId"])) @staticmethod - def append_volume( - runtime: List[str], source: str, target: str, writable: bool = False - ) -> None: + def append_volume(runtime: List[str], source: str, target: str, writable: bool = False) -> None: + """Add binding arguments to the runtime list.""" runtime.append("--bind") # Mounts are writable by default, so 'rw' is optional and not # supported (due to a bug) in some 3.6 series releases. @@ -399,19 +385,13 @@ def add_writable_directory_volume( ensure_writable(host_outdir_tgt) else: if self.inplace_update: - self.append_volume( - runtime, volume.resolved, volume.target, writable=True - ) + self.append_volume(runtime, volume.resolved, volume.target, writable=True) else: if not host_outdir_tgt: tmpdir = create_tmp_dir(tmpdir_prefix) - new_dir = os.path.join( - tmpdir, os.path.basename(volume.resolved) - ) + new_dir = os.path.join(tmpdir, os.path.basename(volume.resolved)) shutil.copytree(volume.resolved, new_dir) - self.append_volume( - runtime, new_dir, volume.target, writable=True - ) + self.append_volume(runtime, new_dir, volume.target, writable=True) else: shutil.copytree(volume.resolved, host_outdir_tgt) ensure_writable(host_outdir_tgt or new_dir) diff --git a/cwltool/software_requirements.py b/cwltool/software_requirements.py index 381aab057..2ebfae13d 100644 --- a/cwltool/software_requirements.py +++ b/cwltool/software_requirements.py @@ -86,12 +86,10 @@ def build_job_script(self, builder: "Builder", command: List[str]) -> str: "conda_auto_install": True, "conda_auto_init": True, } - tool_dependency_manager: "deps.DependencyManager" = ( - deps.build_dependency_manager( - app_config_dict=app_config, - resolution_config_dict=resolution_config_dict, - conf_file=self.dependency_resolvers_config_file, - ) + tool_dependency_manager: "deps.DependencyManager" = deps.build_dependency_manager( + app_config_dict=app_config, + resolution_config_dict=resolution_config_dict, + conf_file=self.dependency_resolvers_config_file, ) dependencies = get_dependencies(builder) handle_dependencies = "" # str diff --git a/cwltool/stdfsaccess.py b/cwltool/stdfsaccess.py index 0c8eea15d..069289111 100644 --- a/cwltool/stdfsaccess.py +++ b/cwltool/stdfsaccess.py @@ -32,9 +32,7 @@ def _abs(self, p: str) -> str: return abspath(p, self.basedir) def glob(self, pattern: str) -> List[str]: - return [ - file_uri(str(self._abs(line))) for line in glob.glob(self._abs(pattern)) - ] + return [file_uri(str(self._abs(line))) for line in glob.glob(self._abs(pattern))] def open(self, fn: str, mode: str) -> IO[Any]: return open(self._abs(fn), mode) @@ -52,10 +50,7 @@ def isdir(self, fn: str) -> bool: return os.path.isdir(self._abs(fn)) def listdir(self, fn: str) -> List[str]: - return [ - abspath(urllib.parse.quote(entry), fn) - for entry in os.listdir(self._abs(fn)) - ] + return [abspath(urllib.parse.quote(entry), fn) for entry in os.listdir(self._abs(fn))] def join(self, path, *paths): # type: (str, *str) -> str return os.path.join(path, *paths) diff --git a/cwltool/subgraph.py b/cwltool/subgraph.py index 147a8143d..f6df7e69f 100644 --- a/cwltool/subgraph.py +++ b/cwltool/subgraph.py @@ -36,7 +36,6 @@ def subgraph_visit( visited: Set[str], direction: str, ) -> None: - if current in visited: return visited.add(current) @@ -175,12 +174,8 @@ def get_subgraph( if nodes[v].type == STEP: wfstep = find_step(tool.steps, v, loading_context)[0] if wfstep is not None: - for inp in cast( - MutableSequence[CWLObjectType], wfstep["inputs"] - ): - if "source" in inp and u in cast( - CWLObjectType, inp["source"] - ): + for inp in cast(MutableSequence[CWLObjectType], wfstep["inputs"]): + if "source" in inp and u in cast(CWLObjectType, inp["source"]): rewire[u] = (rn, cast(CWLObjectType, inp["type"])) break else: @@ -198,11 +193,7 @@ def get_subgraph( continue if isinstance(in_port["source"], MutableSequence): in_port["source"] = CommentedSeq( - [ - rewire[s][0] - for s in in_port["source"] - if s in rewire - ] + [rewire[s][0] for s in in_port["source"] if s in rewire] ) elif in_port["source"] in rewire: in_port["source"] = rewire[in_port["source"]][0] @@ -216,9 +207,7 @@ def get_subgraph( return extracted -def get_step( - tool: Workflow, step_id: str, loading_context: LoadingContext -) -> CommentedMap: +def get_step(tool: Workflow, step_id: str, loading_context: LoadingContext) -> CommentedMap: """Extract a single WorkflowStep for the given step_id.""" extracted = CommentedMap() diff --git a/cwltool/udocker.py b/cwltool/udocker.py index 9db34d7f4..6ab54ff40 100644 --- a/cwltool/udocker.py +++ b/cwltool/udocker.py @@ -9,10 +9,6 @@ class UDockerCommandLineJob(DockerCommandLineJob): """Runs a CommandLineJob in a software container using the udocker engine.""" @staticmethod - def append_volume( - runtime: List[str], source: str, target: str, writable: bool = False - ) -> None: + def append_volume(runtime: List[str], source: str, target: str, writable: bool = False) -> None: """Add binding arguments to the runtime list.""" - runtime.append( - "--volume={}:{}:{}".format(source, target, "rw" if writable else "ro") - ) + runtime.append("--volume={}:{}:{}".format(source, target, "rw" if writable else "ro")) diff --git a/cwltool/update.py b/cwltool/update.py index 1347f9513..b7f54a700 100644 --- a/cwltool/update.py +++ b/cwltool/update.py @@ -61,9 +61,7 @@ def rewrite_requirements(t: CWLObjectType) -> None: r["class"] = rewrite[cls] else: raise ValidationException( - "requirements entries must be dictionaries: {} {}.".format( - type(r), r - ) + "requirements entries must be dictionaries: {} {}.".format(type(r), r) ) if "hints" in t: for r in cast(MutableSequence[CWLObjectType], t["hints"]): @@ -72,17 +70,13 @@ def rewrite_requirements(t: CWLObjectType) -> None: if cls in rewrite: r["class"] = rewrite[cls] else: - raise ValidationException( - f"hints entries must be dictionaries: {type(r)} {r}." - ) + raise ValidationException(f"hints entries must be dictionaries: {type(r)} {r}.") if "steps" in t: for s in cast(MutableSequence[CWLObjectType], t["steps"]): if isinstance(s, MutableMapping): rewrite_requirements(s) else: - raise ValidationException( - f"steps entries must be dictionaries: {type(s)} {s}." - ) + raise ValidationException(f"steps entries must be dictionaries: {type(s)} {s}.") def update_secondaryFiles( t: CWLOutputType, top: bool = False @@ -93,9 +87,7 @@ def update_secondaryFiles( new_seq[index] = update_secondaryFiles(entry) return new_seq elif isinstance(t, MutableSequence): - return CommentedSeq( - [update_secondaryFiles(cast(CWLOutputType, p)) for p in t] - ) + return CommentedSeq([update_secondaryFiles(cast(CWLOutputType, p)) for p in t]) elif isinstance(t, MutableMapping): return cast(MutableMapping[str, str], t) elif top: @@ -136,9 +128,7 @@ def fix_inputBinding(t: CWLObjectType) -> None: proc["hints"].insert(0, na) - ll = CommentedMap( - [("class", "LoadListingRequirement"), ("loadListing", "deep_listing")] - ) + ll = CommentedMap([("class", "LoadListingRequirement"), ("loadListing", "deep_listing")]) ll.lc.filename = comment_filename proc["hints"].insert( 0, @@ -220,17 +210,13 @@ def v1_2_0dev5to1_2( "v1.2", ] -UPDATES: Dict[ - str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]] -] = { +UPDATES: Dict[str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]] = { "v1.0": v1_0to1_1, "v1.1": v1_1to1_2, "v1.2": None, } -DEVUPDATES: Dict[ - str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]] -] = { +DEVUPDATES: Dict[str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]] = { "v1.1.0-dev1": v1_1_0dev1to1_1, "v1.2.0-dev1": v1_2_0dev1todev2, "v1.2.0-dev2": v1_2_0dev2todev3, @@ -324,9 +310,7 @@ def update( (cdoc, version) = checkversion(doc, metadata, enable_dev) originalversion = copy.copy(version) - nextupdate: Optional[ - Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]] - ] = identity + nextupdate: Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]] = identity while version != update_to and nextupdate: (cdoc, version) = nextupdate(cdoc, loader, baseuri) diff --git a/cwltool/utils.py b/cwltool/utils.py index 6a544e6d7..6326af4f6 100644 --- a/cwltool/utils.py +++ b/cwltool/utils.py @@ -66,15 +66,11 @@ int, float, MutableSequence[ - Union[ - None, bool, str, int, float, MutableSequence[Any], MutableMapping[str, Any] - ] + Union[None, bool, str, int, float, MutableSequence[Any], MutableMapping[str, Any]] ], MutableMapping[ str, - Union[ - None, bool, str, int, float, MutableSequence[Any], MutableMapping[str, Any] - ], + Union[None, bool, str, int, float, MutableSequence[Any], MutableMapping[str, Any]], ], ] CWLOutputType = Union[ @@ -88,9 +84,7 @@ CWLObjectType = MutableMapping[str, Optional[CWLOutputType]] """Typical raw dictionary found in lightly parsed CWL.""" -JobsType = Union[ - "CommandLineJob", "JobBase", "WorkflowJob", "ExpressionJob", "CallbackJob" -] +JobsType = Union["CommandLineJob", "JobBase", "WorkflowJob", "ExpressionJob", "CallbackJob"] JobsGeneratorType = Generator[Optional[JobsType], None, None] OutputCallbackType = Callable[[Optional[CWLObjectType], str], None] ResolverType = Callable[["Loader", str], Optional[str]] @@ -102,9 +96,7 @@ "DirectoryType", {"class": str, "listing": List[CWLObjectType], "basename": str} ) JSONAtomType = Union[Dict[str, Any], List[Any], str, int, float, bool, None] -JSONType = Union[ - Dict[str, JSONAtomType], List[JSONAtomType], str, int, float, bool, None -] +JSONType = Union[Dict[str, JSONAtomType], List[JSONAtomType], str, int, float, bool, None] WorkflowStateItem = NamedTuple( "WorkflowStateItem", [ @@ -117,9 +109,7 @@ ParametersType = List[CWLObjectType] StepType = CWLObjectType # WorkflowStep -LoadListingType = Union[ - Literal["no_listing"], Literal["shallow_listing"], Literal["deep_listing"] -] +LoadListingType = Union[Literal["no_listing"], Literal["shallow_listing"], Literal["deep_listing"]] def versionstring() -> str: @@ -306,9 +296,8 @@ def mark(d: Dict[str, str]) -> None: return dd -def get_listing( - fs_access: "StdFsAccess", rec: CWLObjectType, recursive: bool = True -) -> None: +def get_listing(fs_access: "StdFsAccess", rec: CWLObjectType, recursive: bool = True) -> None: + """Expand, recursively, any 'listing' fields in a Directory.""" if rec.get("class") != "Directory": finddirs: List[CWLObjectType] = [] visit_class(rec, ("Directory",), finddirs.append) @@ -446,9 +435,7 @@ def addLocation(d: Dict[str, Any]) -> None: raise ValidationException( "Anonymous file object must have 'contents' and 'basename' fields." ) - if d["class"] == "Directory" and ( - "listing" not in d or "basename" not in d - ): + if d["class"] == "Directory" and ("listing" not in d or "basename" not in d): raise ValidationException( "Anonymous directory object must have 'listing' and 'basename' fields." ) @@ -515,9 +502,8 @@ def __init__(self) -> None: self.requirements: List[CWLObjectType] = [] self.hints: List[CWLObjectType] = [] - def get_requirement( - self, feature: str - ) -> Tuple[Optional[CWLObjectType], Optional[bool]]: + def get_requirement(self, feature: str) -> Tuple[Optional[CWLObjectType], Optional[bool]]: + """Retrieve the named feature from the requirements field, or the hints field.""" for item in reversed(self.requirements): if item["class"] == feature: return (item, True) diff --git a/cwltool/validate_js.py b/cwltool/validate_js.py index 74fdf8a44..7a50e9b1c 100644 --- a/cwltool/validate_js.py +++ b/cwltool/validate_js.py @@ -159,14 +159,11 @@ def jshint_js( # NOTE: we need to assign to ob, as the expression {validateJS: validateJS} as an expression # is interpreted as a block with a label `validateJS` jshint_functions_text += ( - "\n" - + res2.read().decode("utf-8") - + "\nvar ob = {validateJS: validateJS}; ob" + "\n" + res2.read().decode("utf-8") + "\nvar ob = {validateJS: validateJS}; ob" ) returncode, stdout, stderr = exec_js_process( - "validateJS(%s)" - % json_dumps({"code": js_text, "options": options, "globals": globals}), + "validateJS(%s)" % json_dumps({"code": js_text, "options": options, "globals": globals}), timeout=eval_timeout, context=jshint_functions_text, container_engine=container_engine, @@ -205,9 +202,8 @@ def dump_jshint_error() -> None: return JSHintJSReturn(jshint_errors, jshint_json.get("globals", [])) -def print_js_hint_messages( - js_hint_messages: List[str], source_line: Optional[SourceLine] -) -> None: +def print_js_hint_messages(js_hint_messages: List[str], source_line: Optional[SourceLine]) -> None: + """Log the message from JSHint, using the line number.""" if source_line is not None: for js_hint_message in js_hint_messages: _logger.warning(source_line.makeError(js_hint_message)) @@ -220,7 +216,6 @@ def validate_js_expressions( container_engine: str = "docker", eval_timeout: float = 60, ) -> None: - if tool.get("requirements") is None: return debug = _logger.isEnabledFor(logging.DEBUG) diff --git a/cwltool/workflow.py b/cwltool/workflow.py index a387d3a80..a01af0d2d 100644 --- a/cwltool/workflow.py +++ b/cwltool/workflow.py @@ -41,9 +41,8 @@ from .workflow_job import WorkflowJob -def default_make_tool( - toolpath_object: CommentedMap, loadingContext: LoadingContext -) -> Process: +def default_make_tool(toolpath_object: CommentedMap, loadingContext: LoadingContext) -> Process: + """Instatiate the given CWL Process.""" if not isinstance(toolpath_object, MutableMapping): raise WorkflowException("Not a dict: '%s'" % toolpath_object) if "class" in toolpath_object: @@ -60,8 +59,7 @@ def default_make_tool( raise WorkflowException( "Missing or invalid 'class' field in " - "%s, expecting one of: CommandLineTool, ExpressionTool, Workflow" - % toolpath_object["id"] + "%s, expecting one of: CommandLineTool, ExpressionTool, Workflow" % toolpath_object["id"] ) @@ -106,9 +104,7 @@ def __init__( for index, step in enumerate(self.tool.get("steps", [])): try: self.steps.append( - self.make_workflow_step( - step, index, loadingContext, loadingContext.prov_obj - ) + self.make_workflow_step(step, index, loadingContext, loadingContext.prov_obj) ) except ValidationException as vexc: if _logger.isEnabledFor(logging.DEBUG): @@ -213,9 +209,7 @@ def __init__( loadingContext = loadingContext.copy() parent_requirements = copy.deepcopy(getdefault(loadingContext.requirements, [])) - loadingContext.requirements = copy.deepcopy( - toolpath_object.get("requirements", []) - ) + loadingContext.requirements = copy.deepcopy(toolpath_object.get("requirements", [])) assert loadingContext.requirements is not None # nosec for parent_req in parent_requirements: found_in_step = False @@ -223,17 +217,14 @@ def __init__( if parent_req["class"] == step_req["class"]: found_in_step = True break - if ( - not found_in_step - and parent_req.get("class") != "http://commonwl.org/cwltool#Loop" - ): + if not found_in_step and parent_req.get("class") != "http://commonwl.org/cwltool#Loop": loadingContext.requirements.append(parent_req) loadingContext.requirements.extend( cast( List[CWLObjectType], - get_overrides( - getdefault(loadingContext.overrides_list, []), self.id - ).get("requirements", []), + get_overrides(getdefault(loadingContext.overrides_list, []), self.id).get( + "requirements", [] + ), ) ) @@ -306,9 +297,7 @@ def __init__( else: step_entry_name = step_entry validation_errors.append( - SourceLine( - self.tool["out"], index, include_traceback=debug - ).makeError( + SourceLine(self.tool["out"], index, include_traceback=debug).makeError( "Workflow step output '%s' does not correspond to" % shortname(step_entry_name) ) @@ -323,9 +312,7 @@ def __init__( "', '".join( [ shortname(tool_entry["id"]) - for tool_entry in self.embedded_tool.tool[ - "outputs" - ] + for tool_entry in self.embedded_tool.tool["outputs"] ] ) ) @@ -371,8 +358,7 @@ def __init__( (feature, _) = self.get_requirement("ScatterFeatureRequirement") if not feature: raise WorkflowException( - "Workflow contains scatter but ScatterFeatureRequirement " - "not in requirements" + "Workflow contains scatter but ScatterFeatureRequirement " "not in requirements" ) inputparms = copy.deepcopy(self.tool["inputs"]) @@ -388,9 +374,7 @@ def __init__( inp_map = {i["id"]: i for i in inputparms} for inp in scatter: if inp not in inp_map: - SourceLine( - self.tool, "scatter", ValidationException, debug - ).makeError( + SourceLine(self.tool, "scatter", ValidationException, debug).makeError( "Scatter parameter '%s' does not correspond to " "an input parameter of this step, expecting '%s'" % ( diff --git a/cwltool/workflow_job.py b/cwltool/workflow_job.py index a56ec5ffa..baed43601 100644 --- a/cwltool/workflow_job.py +++ b/cwltool/workflow_job.py @@ -94,9 +94,8 @@ def __init__( self.output_callback = output_callback self.steps: List[Optional[JobsGeneratorType]] = [] - def receive_scatter_output( - self, index: int, jobout: CWLObjectType, processStatus: str - ) -> None: + def receive_scatter_output(self, index: int, jobout: CWLObjectType, processStatus: str) -> None: + """Record the results of a scatter operation.""" for key, val in jobout.items(): self.dest[key][index] = val @@ -138,9 +137,10 @@ def parallel_steps( while rc.completed < rc.total: made_progress = False for index, step in enumerate(steps): - if getdefault( - runtimeContext.on_error, "stop" - ) == "stop" and rc.processStatus not in ("success", "skipped"): + if getdefault(runtimeContext.on_error, "stop") == "stop" and rc.processStatus not in ( + "success", + "skipped", + ): break if step is None: continue @@ -184,9 +184,7 @@ def nested_crossproduct_scatter( for index in range(0, jobl): sjob: Optional[CWLObjectType] = copy.copy(joborder) assert sjob is not None # nosec - sjob[scatter_key] = cast( - MutableMapping[int, CWLObjectType], joborder[scatter_key] - )[index] + sjob[scatter_key] = cast(MutableMapping[int, CWLObjectType], joborder[scatter_key])[index] if len(scatter_keys) == 1: if runtimeContext.postScatterEval is not None: @@ -212,9 +210,8 @@ def nested_crossproduct_scatter( return parallel_steps(steps, rc, runtimeContext) -def crossproduct_size( - joborder: CWLObjectType, scatter_keys: MutableSequence[str] -) -> int: +def crossproduct_size(joborder: CWLObjectType, scatter_keys: MutableSequence[str]) -> int: + """Compute the size of a cross product.""" scatter_key = scatter_keys[0] if len(scatter_keys) == 1: ssum = len(cast(Sized, joborder[scatter_key])) @@ -259,9 +256,7 @@ def _flat_crossproduct_scatter( for index in range(0, jobl): sjob: Optional[CWLObjectType] = copy.copy(joborder) assert sjob is not None # nosec - sjob[scatter_key] = cast( - MutableMapping[int, CWLObjectType], joborder[scatter_key] - )[index] + sjob[scatter_key] = cast(MutableMapping[int, CWLObjectType], joborder[scatter_key])[index] if len(scatter_keys) == 1: if runtimeContext.postScatterEval is not None: @@ -296,8 +291,7 @@ def dotproduct_scatter( jobl = len(cast(Sized, joborder[key])) elif jobl != len(cast(Sized, joborder[key])): raise WorkflowException( - "Length of input arrays must be equal when performing " - "dotproduct scatter." + "Length of input arrays must be equal when performing " "dotproduct scatter." ) if jobl is None: raise Exception("Impossible codepath") @@ -508,7 +502,6 @@ def __init__(self, workflow: "Workflow", runtimeContext: RuntimeContext) -> None ) def do_output_callback(self, final_output_callback: OutputCallbackType) -> None: - supportsMultipleInput = bool( self.workflow.get_requirement("MultipleInputFeatureRequirement")[0] ) @@ -524,9 +517,7 @@ def do_output_callback(self, final_output_callback: OutputCallbackType) -> None: incomplete=True, ) except WorkflowException as err: - _logger.error( - "[%s] Cannot collect workflow output: %s", self.name, str(err) - ) + _logger.error("[%s] Cannot collect workflow output: %s", self.name, str(err)) self.processStatus = "permanentFail" if ( self.prov_obj @@ -543,9 +534,7 @@ def do_output_callback(self, final_output_callback: OutputCallbackType) -> None: ) prov_ids = self.prov_obj.finalize_prov_profile(self.name) # Tell parent to associate our provenance files with our wf run - self.parent_wf.activity_has_provenance( - self.prov_obj.workflow_run_uri, prov_ids - ) + self.parent_wf.activity_has_provenance(self.prov_obj.workflow_run_uri, prov_ids) _logger.info("[%s] completed %s", self.name, self.processStatus) if _logger.isEnabledFor(logging.DEBUG): @@ -563,21 +552,16 @@ def receive_output( jobout: CWLObjectType, processStatus: str, ) -> None: - for i in outputparms: if "id" in i: iid = cast(str, i["id"]) if iid in jobout: self.state[iid] = WorkflowStateItem(i, jobout[iid], processStatus) else: - _logger.error( - "[%s] Output is missing expected field %s", step.name, iid - ) + _logger.error("[%s] Output is missing expected field %s", step.name, iid) processStatus = "permanentFail" if _logger.isEnabledFor(logging.DEBUG): - _logger.debug( - "[%s] produced output %s", step.name, json_dumps(jobout, indent=4) - ) + _logger.debug("[%s] produced output %s", step.name, json_dumps(jobout, indent=4)) if processStatus not in ("success", "skipped"): if self.processStatus != "permanentFail": @@ -632,13 +616,9 @@ def try_make_job( self.receive_output, step, outputparms, final_output_callback ) - valueFrom = { - i["id"]: i["valueFrom"] for i in step.tool["inputs"] if "valueFrom" in i - } + valueFrom = {i["id"]: i["valueFrom"] for i in step.tool["inputs"] if "valueFrom" in i} - loadContents = { - i["id"] for i in step.tool["inputs"] if i.get("loadContents") - } + loadContents = {i["id"] for i in step.tool["inputs"] if i.get("loadContents")} if len(valueFrom) > 0 and not bool( self.workflow.get_requirement("StepInputExpressionRequirement")[0] @@ -658,13 +638,9 @@ def postScatterEval(io: CWLObjectType) -> Optional[CWLObjectType]: with fs_access.open(cast(str, val["location"]), "rb") as f: val["contents"] = content_limit_respected_read(f) - def valueFromFunc( - k: str, v: Optional[CWLOutputType] - ) -> Optional[CWLOutputType]: + def valueFromFunc(k: str, v: Optional[CWLOutputType]) -> Optional[CWLOutputType]: if k in valueFrom: - adjustDirObjs( - v, functools.partial(get_listing, fs_access, recursive=True) - ) + adjustDirObjs(v, functools.partial(get_listing, fs_access, recursive=True)) return expression.do_eval( valueFrom[k], @@ -727,9 +703,7 @@ def valueFromFunc( runtimeContext = runtimeContext.copy() runtimeContext.postScatterEval = postScatterEval - emptyscatter = [ - shortname(s) for s in scatter if len(cast(Sized, inputobj[s])) == 0 - ] + emptyscatter = [shortname(s) for s in scatter if len(cast(Sized, inputobj[s])) == 0] if emptyscatter: _logger.warning( "[job %s] Notice: scattering over empty input in " @@ -739,9 +713,7 @@ def valueFromFunc( ) if method == "dotproduct" or method is None: - jobs = dotproduct_scatter( - step, inputobj, scatter, callback, runtimeContext - ) + jobs = dotproduct_scatter(step, inputobj, scatter, callback, runtimeContext) elif method == "nested_crossproduct": jobs = nested_crossproduct_scatter( step, inputobj, scatter, callback, runtimeContext @@ -752,9 +724,7 @@ def valueFromFunc( ) else: if _logger.isEnabledFor(logging.DEBUG): - _logger.debug( - "[%s] job input %s", step.name, json_dumps(inputobj, indent=4) - ) + _logger.debug("[%s] job input %s", step.name, json_dumps(inputobj, indent=4)) inputobj = postScatterEval(inputobj) if inputobj is not None: @@ -814,13 +784,9 @@ def job( with SourceLine(self.tool["inputs"], index, WorkflowException, debug): inp_id = shortname(inp["id"]) if inp_id in joborder: - self.state[inp["id"]] = WorkflowStateItem( - inp, joborder[inp_id], "success" - ) + self.state[inp["id"]] = WorkflowStateItem(inp, joborder[inp_id], "success") elif "default" in inp: - self.state[inp["id"]] = WorkflowStateItem( - inp, inp["default"], "success" - ) + self.state[inp["id"]] = WorkflowStateItem(inp, inp["default"], "success") else: raise WorkflowException( "Input '%s' not in input object and does not have a " @@ -844,9 +810,7 @@ def job( if not step.submitted: try: - step.iterable = self.try_make_job( - step, output_callback, runtimeContext - ) + step.iterable = self.try_make_job(step, output_callback, runtimeContext) except WorkflowException as exc: _logger.error("[%s] Cannot make job: %s", step.name, str(exc)) _logger.debug("", exc_info=True) @@ -905,9 +869,7 @@ def _set_empty_output(self, loop_req: CWLObjectType) -> None: if "id" in i: iid = cast(str, i["id"]) if loop_req.get("outputMethod") == "all": - self.output_buffer[iid] = cast( - MutableSequence[Optional[CWLOutputType]], [] - ) + self.output_buffer[iid] = cast(MutableSequence[Optional[CWLOutputType]], []) else: self.output_buffer[iid] = None @@ -1033,7 +995,6 @@ def loop_callback( self.processStatus = processStatus if processStatus not in ("success", "skipped"): - _logger.warning( "[%s] Iteration %i completed %s", self.step.name, @@ -1087,9 +1048,7 @@ def loop_callback( for k, v in inputobj.items(): if k in valueFrom: - adjustDirObjs( - v, functools.partial(get_listing, fs_access, recursive=True) - ) + adjustDirObjs(v, functools.partial(get_listing, fs_access, recursive=True)) inputobj[k] = cast( CWLObjectType, expression.do_eval( diff --git a/lint-requirements.txt b/lint-requirements.txt index 2287ebe3b..280da7dcc 100644 --- a/lint-requirements.txt +++ b/lint-requirements.txt @@ -1,3 +1,3 @@ flake8-bugbear -black ~= 22.0 +black ~= 23.1 codespell diff --git a/mypy-stubs/graphviz/dot.pyi b/mypy-stubs/graphviz/dot.pyi index 01b81ea0f..45627097c 100644 --- a/mypy-stubs/graphviz/dot.pyi +++ b/mypy-stubs/graphviz/dot.pyi @@ -37,7 +37,7 @@ class Dot(files.File): name, label: Optional[Any] = ..., _attributes: Optional[Any] = ..., - **attrs + **attrs, ): ... def edge( self, @@ -45,12 +45,10 @@ class Dot(files.File): head_name, label: Optional[Any] = ..., _attributes: Optional[Any] = ..., - **attrs + **attrs, ): ... def edges(self, tail_head_iter): ... - def attr( - self, kw: Optional[Any] = ..., _attributes: Optional[Any] = ..., **attrs - ): ... + def attr(self, kw: Optional[Any] = ..., _attributes: Optional[Any] = ..., **attrs): ... def subgraph( self, graph: Optional[Any] = ..., diff --git a/mypy-stubs/graphviz/lang.pyi b/mypy-stubs/graphviz/lang.pyi index 9613c5c50..28f163ea9 100644 --- a/mypy-stubs/graphviz/lang.pyi +++ b/mypy-stubs/graphviz/lang.pyi @@ -4,9 +4,7 @@ from typing import Any, Optional -def quote( - identifier, html: Any = ..., valid_id: Any = ..., dot_keywords: Any = ... -): ... +def quote(identifier, html: Any = ..., valid_id: Any = ..., dot_keywords: Any = ...): ... def quote_edge(identifier): ... def a_list( label: Optional[Any] = ..., diff --git a/mypy-stubs/mistune.pyi b/mypy-stubs/mistune.pyi index b4fe10213..3778c9195 100644 --- a/mypy-stubs/mistune.pyi +++ b/mypy-stubs/mistune.pyi @@ -168,7 +168,7 @@ class Markdown: renderer: Optional[Renderer] = ..., inline: Optional[InlineLexer] = ..., block: Optional[BlockLexer] = ..., - **kwargs: Any + **kwargs: Any, ) -> None: ... def __call__(self, text: str) -> str: ... def render(self, text: str) -> str: ... diff --git a/mypy-stubs/networkx/algorithms/approximation/connectivity.pyi b/mypy-stubs/networkx/algorithms/approximation/connectivity.pyi index e7723ca0f..d10a99c6e 100644 --- a/mypy-stubs/networkx/algorithms/approximation/connectivity.pyi +++ b/mypy-stubs/networkx/algorithms/approximation/connectivity.pyi @@ -6,6 +6,4 @@ from typing import Any, Optional def local_node_connectivity(G, source, target, cutoff: Optional[Any] = ...): ... def node_connectivity(G, s: Optional[Any] = ..., t: Optional[Any] = ...): ... -def all_pairs_node_connectivity( - G, nbunch: Optional[Any] = ..., cutoff: Optional[Any] = ... -): ... +def all_pairs_node_connectivity(G, nbunch: Optional[Any] = ..., cutoff: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/algorithms/assortativity/mixing.pyi b/mypy-stubs/networkx/algorithms/assortativity/mixing.pyi index 2b938b7ba..ba6782513 100644 --- a/mypy-stubs/networkx/algorithms/assortativity/mixing.pyi +++ b/mypy-stubs/networkx/algorithms/assortativity/mixing.pyi @@ -4,9 +4,7 @@ from typing import Any, Optional -def attribute_mixing_dict( - G, attribute, nodes: Optional[Any] = ..., normalized: bool = ... -): ... +def attribute_mixing_dict(G, attribute, nodes: Optional[Any] = ..., normalized: bool = ...): ... def attribute_mixing_matrix( G, attribute, @@ -30,7 +28,5 @@ def degree_mixing_matrix( nodes: Optional[Any] = ..., normalized: bool = ..., ): ... -def numeric_mixing_matrix( - G, attribute, nodes: Optional[Any] = ..., normalized: bool = ... -): ... +def numeric_mixing_matrix(G, attribute, nodes: Optional[Any] = ..., normalized: bool = ...): ... def mixing_dict(xy, normalized: bool = ...): ... diff --git a/mypy-stubs/networkx/algorithms/bipartite/matrix.pyi b/mypy-stubs/networkx/algorithms/bipartite/matrix.pyi index 56588c7b1..8d35779d7 100644 --- a/mypy-stubs/networkx/algorithms/bipartite/matrix.pyi +++ b/mypy-stubs/networkx/algorithms/bipartite/matrix.pyi @@ -12,6 +12,4 @@ def biadjacency_matrix( weight: str = ..., format: str = ..., ): ... -def from_biadjacency_matrix( - A, create_using: Optional[Any] = ..., edge_attribute: str = ... -): ... +def from_biadjacency_matrix(A, create_using: Optional[Any] = ..., edge_attribute: str = ...): ... diff --git a/mypy-stubs/networkx/algorithms/bipartite/projection.pyi b/mypy-stubs/networkx/algorithms/bipartite/projection.pyi index 9f7825752..f5b72a9f0 100644 --- a/mypy-stubs/networkx/algorithms/bipartite/projection.pyi +++ b/mypy-stubs/networkx/algorithms/bipartite/projection.pyi @@ -8,7 +8,5 @@ def projected_graph(B, nodes, multigraph: bool = ...): ... def weighted_projected_graph(B, nodes, ratio: bool = ...): ... def collaboration_weighted_projected_graph(B, nodes): ... def overlap_weighted_projected_graph(B, nodes, jaccard: bool = ...): ... -def generic_weighted_projected_graph( - B, nodes, weight_function: Optional[Any] = ... -): ... +def generic_weighted_projected_graph(B, nodes, weight_function: Optional[Any] = ...): ... def project(B, nodes, create_using: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/algorithms/centrality/flow_matrix.pyi b/mypy-stubs/networkx/algorithms/centrality/flow_matrix.pyi index 1a6009919..e84fcadfb 100644 --- a/mypy-stubs/networkx/algorithms/centrality/flow_matrix.pyi +++ b/mypy-stubs/networkx/algorithms/centrality/flow_matrix.pyi @@ -4,9 +4,7 @@ from typing import Any, Optional -def flow_matrix_row( - G, weight: Optional[Any] = ..., dtype: Any = ..., solver: str = ... -): ... +def flow_matrix_row(G, weight: Optional[Any] = ..., dtype: Any = ..., solver: str = ...): ... class InverseLaplacian: dtype: Any = ... @@ -14,9 +12,7 @@ class InverseLaplacian: w: Any = ... C: Any = ... L1: Any = ... - def __init__( - self, L, width: Optional[Any] = ..., dtype: Optional[Any] = ... - ) -> None: ... + def __init__(self, L, width: Optional[Any] = ..., dtype: Optional[Any] = ...) -> None: ... def init_solver(self, L): ... def solve(self, r): ... def solve_inverse(self, r): ... diff --git a/mypy-stubs/networkx/algorithms/centrality/harmonic.pyi b/mypy-stubs/networkx/algorithms/centrality/harmonic.pyi index 521de798a..723e2c919 100644 --- a/mypy-stubs/networkx/algorithms/centrality/harmonic.pyi +++ b/mypy-stubs/networkx/algorithms/centrality/harmonic.pyi @@ -4,6 +4,4 @@ from typing import Any, Optional -def harmonic_centrality( - G, nbunch: Optional[Any] = ..., distance: Optional[Any] = ... -): ... +def harmonic_centrality(G, nbunch: Optional[Any] = ..., distance: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/algorithms/centrality/reaching.pyi b/mypy-stubs/networkx/algorithms/centrality/reaching.pyi index c6f4cd02f..7a811184a 100644 --- a/mypy-stubs/networkx/algorithms/centrality/reaching.pyi +++ b/mypy-stubs/networkx/algorithms/centrality/reaching.pyi @@ -4,9 +4,7 @@ from typing import Any, Optional -def global_reaching_centrality( - G, weight: Optional[Any] = ..., normalized: bool = ... -): ... +def global_reaching_centrality(G, weight: Optional[Any] = ..., normalized: bool = ...): ... def local_reaching_centrality( G, v, diff --git a/mypy-stubs/networkx/algorithms/clique.pyi b/mypy-stubs/networkx/algorithms/clique.pyi index ef1a7faa6..110fee585 100644 --- a/mypy-stubs/networkx/algorithms/clique.pyi +++ b/mypy-stubs/networkx/algorithms/clique.pyi @@ -18,6 +18,4 @@ def graph_clique_number(G, cliques: Optional[Any] = ...): ... def graph_number_of_cliques(G, cliques: Optional[Any] = ...): ... def node_clique_number(G, nodes: Optional[Any] = ..., cliques: Optional[Any] = ...): ... def number_of_cliques(G, nodes: Optional[Any] = ..., cliques: Optional[Any] = ...): ... -def cliques_containing_node( - G, nodes: Optional[Any] = ..., cliques: Optional[Any] = ... -): ... +def cliques_containing_node(G, nodes: Optional[Any] = ..., cliques: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/algorithms/connectivity/connectivity.pyi b/mypy-stubs/networkx/algorithms/connectivity/connectivity.pyi index 9d8780294..99816454d 100644 --- a/mypy-stubs/networkx/algorithms/connectivity/connectivity.pyi +++ b/mypy-stubs/networkx/algorithms/connectivity/connectivity.pyi @@ -21,9 +21,7 @@ def node_connectivity( G, s: Optional[Any] = ..., t: Optional[Any] = ..., flow_func: Optional[Any] = ... ): ... def average_node_connectivity(G, flow_func: Optional[Any] = ...): ... -def all_pairs_node_connectivity( - G, nbunch: Optional[Any] = ..., flow_func: Optional[Any] = ... -): ... +def all_pairs_node_connectivity(G, nbunch: Optional[Any] = ..., flow_func: Optional[Any] = ...): ... def local_edge_connectivity( G, s, diff --git a/mypy-stubs/networkx/algorithms/flow/maxflow.pyi b/mypy-stubs/networkx/algorithms/flow/maxflow.pyi index e0ebab1f7..6e4b10a81 100644 --- a/mypy-stubs/networkx/algorithms/flow/maxflow.pyi +++ b/mypy-stubs/networkx/algorithms/flow/maxflow.pyi @@ -8,15 +8,11 @@ from .preflowpush import preflow_push default_flow_func = preflow_push -def maximum_flow( - flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs -): ... +def maximum_flow(flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs): ... def maximum_flow_value( flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs ): ... -def minimum_cut( - flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs -): ... +def minimum_cut(flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs): ... def minimum_cut_value( flowG, _s, _t, capacity: str = ..., flow_func: Optional[Any] = ..., **kwargs ): ... diff --git a/mypy-stubs/networkx/algorithms/flow/mincost.pyi b/mypy-stubs/networkx/algorithms/flow/mincost.pyi index 93c4e6904..8de9f4835 100644 --- a/mypy-stubs/networkx/algorithms/flow/mincost.pyi +++ b/mypy-stubs/networkx/algorithms/flow/mincost.pyi @@ -2,9 +2,7 @@ # # NOTE: This dynamically typed stub was automatically generated by stubgen. -def min_cost_flow_cost( - G, demand: str = ..., capacity: str = ..., weight: str = ... -): ... +def min_cost_flow_cost(G, demand: str = ..., capacity: str = ..., weight: str = ...): ... def min_cost_flow(G, demand: str = ..., capacity: str = ..., weight: str = ...): ... def cost_of_flow(G, flowDict, weight: str = ...): ... def max_flow_min_cost(G, s, t, capacity: str = ..., weight: str = ...): ... diff --git a/mypy-stubs/networkx/algorithms/hybrid.pyi b/mypy-stubs/networkx/algorithms/hybrid.pyi index 020479b17..e6a6f14a6 100644 --- a/mypy-stubs/networkx/algorithms/hybrid.pyi +++ b/mypy-stubs/networkx/algorithms/hybrid.pyi @@ -2,7 +2,5 @@ # # NOTE: This dynamically typed stub was automatically generated by stubgen. -def kl_connected_subgraph( - G, k, l, low_memory: bool = ..., same_as_graph: bool = ... -): ... +def kl_connected_subgraph(G, k, l, low_memory: bool = ..., same_as_graph: bool = ...): ... def is_kl_connected(G, k, l, low_memory: bool = ...): ... diff --git a/mypy-stubs/networkx/algorithms/isomorphism/isomorph.pyi b/mypy-stubs/networkx/algorithms/isomorphism/isomorph.pyi index 25e42d410..a706dce5b 100644 --- a/mypy-stubs/networkx/algorithms/isomorphism/isomorph.pyi +++ b/mypy-stubs/networkx/algorithms/isomorphism/isomorph.pyi @@ -16,6 +16,4 @@ def faster_could_be_isomorphic(G1, G2): ... faster_graph_could_be_isomorphic = faster_could_be_isomorphic -def is_isomorphic( - G1, G2, node_match: Optional[Any] = ..., edge_match: Optional[Any] = ... -): ... +def is_isomorphic(G1, G2, node_match: Optional[Any] = ..., edge_match: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/algorithms/isomorphism/isomorphvf2.pyi b/mypy-stubs/networkx/algorithms/isomorphism/isomorphvf2.pyi index 30a2d51e1..caec90325 100644 --- a/mypy-stubs/networkx/algorithms/isomorphism/isomorphvf2.pyi +++ b/mypy-stubs/networkx/algorithms/isomorphism/isomorphvf2.pyi @@ -48,9 +48,7 @@ class GMState: G1_node: Any = ... G2_node: Any = ... depth: Any = ... - def __init__( - self, GM, G1_node: Optional[Any] = ..., G2_node: Optional[Any] = ... - ) -> None: ... + def __init__(self, GM, G1_node: Optional[Any] = ..., G2_node: Optional[Any] = ...) -> None: ... def restore(self): ... class DiGMState: @@ -58,7 +56,5 @@ class DiGMState: G1_node: Any = ... G2_node: Any = ... depth: Any = ... - def __init__( - self, GM, G1_node: Optional[Any] = ..., G2_node: Optional[Any] = ... - ) -> None: ... + def __init__(self, GM, G1_node: Optional[Any] = ..., G2_node: Optional[Any] = ...) -> None: ... def restore(self): ... diff --git a/mypy-stubs/networkx/algorithms/link_prediction.pyi b/mypy-stubs/networkx/algorithms/link_prediction.pyi index 1d9a5813b..75afac061 100644 --- a/mypy-stubs/networkx/algorithms/link_prediction.pyi +++ b/mypy-stubs/networkx/algorithms/link_prediction.pyi @@ -9,9 +9,7 @@ def jaccard_coefficient(G, ebunch: Optional[Any] = ...): ... def adamic_adar_index(G, ebunch: Optional[Any] = ...): ... def preferential_attachment(G, ebunch: Optional[Any] = ...): ... def cn_soundarajan_hopcroft(G, ebunch: Optional[Any] = ..., community: str = ...): ... -def ra_index_soundarajan_hopcroft( - G, ebunch: Optional[Any] = ..., community: str = ... -): ... +def ra_index_soundarajan_hopcroft(G, ebunch: Optional[Any] = ..., community: str = ...): ... def within_inter_cluster( G, ebunch: Optional[Any] = ..., delta: float = ..., community: str = ... ): ... diff --git a/mypy-stubs/networkx/algorithms/shortest_paths/astar.pyi b/mypy-stubs/networkx/algorithms/shortest_paths/astar.pyi index abe9be2dc..37b2fdd6a 100644 --- a/mypy-stubs/networkx/algorithms/shortest_paths/astar.pyi +++ b/mypy-stubs/networkx/algorithms/shortest_paths/astar.pyi @@ -4,9 +4,5 @@ from typing import Any, Optional -def astar_path( - G, source, target, heuristic: Optional[Any] = ..., weight: str = ... -): ... -def astar_path_length( - G, source, target, heuristic: Optional[Any] = ..., weight: str = ... -): ... +def astar_path(G, source, target, heuristic: Optional[Any] = ..., weight: str = ...): ... +def astar_path_length(G, source, target, heuristic: Optional[Any] = ..., weight: str = ...): ... diff --git a/mypy-stubs/networkx/algorithms/shortest_paths/weighted.pyi b/mypy-stubs/networkx/algorithms/shortest_paths/weighted.pyi index 52e388360..7f198179b 100644 --- a/mypy-stubs/networkx/algorithms/shortest_paths/weighted.pyi +++ b/mypy-stubs/networkx/algorithms/shortest_paths/weighted.pyi @@ -6,9 +6,7 @@ from typing import Any, Optional def dijkstra_path(G, source, target, weight: str = ...): ... def dijkstra_path_length(G, source, target, weight: str = ...): ... -def single_source_dijkstra_path( - G, source, cutoff: Optional[Any] = ..., weight: str = ... -): ... +def single_source_dijkstra_path(G, source, cutoff: Optional[Any] = ..., weight: str = ...): ... def single_source_dijkstra_path_length( G, source, cutoff: Optional[Any] = ..., weight: str = ... ): ... @@ -19,9 +17,7 @@ def single_source_dijkstra( cutoff: Optional[Any] = ..., weight: str = ..., ): ... -def multi_source_dijkstra_path( - G, sources, cutoff: Optional[Any] = ..., weight: str = ... -): ... +def multi_source_dijkstra_path(G, sources, cutoff: Optional[Any] = ..., weight: str = ...): ... def multi_source_dijkstra_path_length( G, sources, cutoff: Optional[Any] = ..., weight: str = ... ): ... @@ -36,9 +32,7 @@ def dijkstra_predecessor_and_distance( G, source, cutoff: Optional[Any] = ..., weight: str = ... ): ... def all_pairs_dijkstra(G, cutoff: Optional[Any] = ..., weight: str = ...): ... -def all_pairs_dijkstra_path_length( - G, cutoff: Optional[Any] = ..., weight: str = ... -): ... +def all_pairs_dijkstra_path_length(G, cutoff: Optional[Any] = ..., weight: str = ...): ... def all_pairs_dijkstra_path(G, cutoff: Optional[Any] = ..., weight: str = ...): ... def bellman_ford_predecessor_and_distance( G, @@ -49,9 +43,7 @@ def bellman_ford_predecessor_and_distance( ): ... def bellman_ford_path(G, source, target, weight: str = ...): ... def bellman_ford_path_length(G, source, target, weight: str = ...): ... -def single_source_bellman_ford_path( - G, source, cutoff: Optional[Any] = ..., weight: str = ... -): ... +def single_source_bellman_ford_path(G, source, cutoff: Optional[Any] = ..., weight: str = ...): ... def single_source_bellman_ford_path_length( G, source, cutoff: Optional[Any] = ..., weight: str = ... ): ... @@ -62,9 +54,7 @@ def single_source_bellman_ford( cutoff: Optional[Any] = ..., weight: str = ..., ): ... -def all_pairs_bellman_ford_path_length( - G, cutoff: Optional[Any] = ..., weight: str = ... -): ... +def all_pairs_bellman_ford_path_length(G, cutoff: Optional[Any] = ..., weight: str = ...): ... def all_pairs_bellman_ford_path(G, cutoff: Optional[Any] = ..., weight: str = ...): ... def goldberg_radzik(G, source, weight: str = ...): ... def negative_edge_cycle(G, weight: str = ...): ... diff --git a/mypy-stubs/networkx/algorithms/traversal/depth_first_search.pyi b/mypy-stubs/networkx/algorithms/traversal/depth_first_search.pyi index 554accd1c..dc8fecf60 100644 --- a/mypy-stubs/networkx/algorithms/traversal/depth_first_search.pyi +++ b/mypy-stubs/networkx/algorithms/traversal/depth_first_search.pyi @@ -6,18 +6,8 @@ from typing import Any, Optional def dfs_edges(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ... def dfs_tree(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ... -def dfs_predecessors( - G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ... -): ... -def dfs_successors( - G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ... -): ... -def dfs_postorder_nodes( - G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ... -): ... -def dfs_preorder_nodes( - G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ... -): ... -def dfs_labeled_edges( - G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ... -): ... +def dfs_predecessors(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ... +def dfs_successors(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ... +def dfs_postorder_nodes(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ... +def dfs_preorder_nodes(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ... +def dfs_labeled_edges(G, source: Optional[Any] = ..., depth_limit: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/algorithms/tree/mst.pyi b/mypy-stubs/networkx/algorithms/tree/mst.pyi index 13a819bc1..7846d8aac 100644 --- a/mypy-stubs/networkx/algorithms/tree/mst.pyi +++ b/mypy-stubs/networkx/algorithms/tree/mst.pyi @@ -18,9 +18,5 @@ def maximum_spanning_edges( data: bool = ..., ignore_nan: bool = ..., ): ... -def minimum_spanning_tree( - G, weight: str = ..., algorithm: str = ..., ignore_nan: bool = ... -): ... -def maximum_spanning_tree( - G, weight: str = ..., algorithm: str = ..., ignore_nan: bool = ... -): ... +def minimum_spanning_tree(G, weight: str = ..., algorithm: str = ..., ignore_nan: bool = ...): ... +def maximum_spanning_tree(G, weight: str = ..., algorithm: str = ..., ignore_nan: bool = ...): ... diff --git a/mypy-stubs/networkx/classes/function.pyi b/mypy-stubs/networkx/classes/function.pyi index 4de47f5f1..d93da4395 100644 --- a/mypy-stubs/networkx/classes/function.pyi +++ b/mypy-stubs/networkx/classes/function.pyi @@ -39,7 +39,5 @@ def is_weighted(G, edge: Optional[Any] = ..., weight: str = ...): ... def is_negatively_weighted(G, edge: Optional[Any] = ..., weight: str = ...): ... def is_empty(G): ... def nodes_with_selfloops(G): ... -def selfloop_edges( - G, data: bool = ..., keys: bool = ..., default: Optional[Any] = ... -): ... +def selfloop_edges(G, data: bool = ..., keys: bool = ..., default: Optional[Any] = ...): ... def number_of_selfloops(G): ... diff --git a/mypy-stubs/networkx/classes/graph.pyi b/mypy-stubs/networkx/classes/graph.pyi index 302b25351..9692b5110 100644 --- a/mypy-stubs/networkx/classes/graph.pyi +++ b/mypy-stubs/networkx/classes/graph.pyi @@ -34,9 +34,7 @@ class Graph: def add_star(self, nodes, **attr): ... def nodes_with_selfloops(self): ... def number_of_selfloops(self): ... - def selfloop_edges( - self, data: bool = ..., keys: bool = ..., default: Optional[Any] = ... - ): ... + def selfloop_edges(self, data: bool = ..., keys: bool = ..., default: Optional[Any] = ...): ... def number_of_nodes(self): ... def order(self): ... def has_node(self, n): ... diff --git a/mypy-stubs/networkx/classes/graphviews.pyi b/mypy-stubs/networkx/classes/graphviews.pyi index 064a80f44..3b56bc4b0 100644 --- a/mypy-stubs/networkx/classes/graphviews.pyi +++ b/mypy-stubs/networkx/classes/graphviews.pyi @@ -10,30 +10,22 @@ from networkx.classes.coreviews import ReadOnlyGraph class SubGraph(ReadOnlyGraph, Graph): root_graph: Any = ... graph: Any = ... - def __init__( - self, graph, filter_node: Any = ..., filter_edge: Any = ... - ) -> None: ... + def __init__(self, graph, filter_node: Any = ..., filter_edge: Any = ...) -> None: ... class SubDiGraph(ReadOnlyGraph, DiGraph): root_graph: Any = ... graph: Any = ... - def __init__( - self, graph, filter_node: Any = ..., filter_edge: Any = ... - ) -> None: ... + def __init__(self, graph, filter_node: Any = ..., filter_edge: Any = ...) -> None: ... class SubMultiGraph(ReadOnlyGraph, MultiGraph): root_graph: Any = ... graph: Any = ... - def __init__( - self, graph, filter_node: Any = ..., filter_edge: Any = ... - ) -> None: ... + def __init__(self, graph, filter_node: Any = ..., filter_edge: Any = ...) -> None: ... class SubMultiDiGraph(ReadOnlyGraph, MultiDiGraph): root_graph: Any = ... graph: Any = ... - def __init__( - self, graph, filter_node: Any = ..., filter_edge: Any = ... - ) -> None: ... + def __init__(self, graph, filter_node: Any = ..., filter_edge: Any = ...) -> None: ... class ReverseView(ReadOnlyGraph, DiGraph): root_graph: Any = ... diff --git a/mypy-stubs/networkx/classes/multigraph.pyi b/mypy-stubs/networkx/classes/multigraph.pyi index 18c4329da..8e6512bb3 100644 --- a/mypy-stubs/networkx/classes/multigraph.pyi +++ b/mypy-stubs/networkx/classes/multigraph.pyi @@ -19,9 +19,7 @@ class MultiGraph(Graph): def has_edge(self, u, v, key: Optional[Any] = ...): ... @property def edges(self): ... - def get_edge_data( - self, u, v, key: Optional[Any] = ..., default: Optional[Any] = ... - ): ... + def get_edge_data(self, u, v, key: Optional[Any] = ..., default: Optional[Any] = ...): ... @property def degree(self): ... def is_multigraph(self): ... diff --git a/mypy-stubs/networkx/classes/reportviews.pyi b/mypy-stubs/networkx/classes/reportviews.pyi index 41d409d77..66a687843 100644 --- a/mypy-stubs/networkx/classes/reportviews.pyi +++ b/mypy-stubs/networkx/classes/reportviews.pyi @@ -15,18 +15,14 @@ class NodeView(Mapping, Set): def data(self, data: bool = ..., default: Optional[Any] = ...): ... class NodeDataView(Set): - def __init__( - self, nodedict, data: bool = ..., default: Optional[Any] = ... - ) -> None: ... + def __init__(self, nodedict, data: bool = ..., default: Optional[Any] = ...) -> None: ... def __len__(self): ... def __iter__(self): ... def __contains__(self, n): ... def __getitem__(self, n): ... class DiDegreeView: - def __init__( - self, G, nbunch: Optional[Any] = ..., weight: Optional[Any] = ... - ) -> None: ... + def __init__(self, G, nbunch: Optional[Any] = ..., weight: Optional[Any] = ...) -> None: ... def __call__(self, nbunch: Optional[Any] = ..., weight: Optional[Any] = ...): ... def __getitem__(self, n): ... def __iter__(self): ... diff --git a/mypy-stubs/networkx/convert.pyi b/mypy-stubs/networkx/convert.pyi index ac3868c1b..cc98a3e9d 100644 --- a/mypy-stubs/networkx/convert.pyi +++ b/mypy-stubs/networkx/convert.pyi @@ -4,16 +4,10 @@ from typing import Any, Optional -def to_networkx_graph( - data, create_using: Optional[Any] = ..., multigraph_input: bool = ... -): ... +def to_networkx_graph(data, create_using: Optional[Any] = ..., multigraph_input: bool = ...): ... def to_dict_of_lists(G, nodelist: Optional[Any] = ...): ... def from_dict_of_lists(d, create_using: Optional[Any] = ...): ... -def to_dict_of_dicts( - G, nodelist: Optional[Any] = ..., edge_data: Optional[Any] = ... -): ... -def from_dict_of_dicts( - d, create_using: Optional[Any] = ..., multigraph_input: bool = ... -): ... +def to_dict_of_dicts(G, nodelist: Optional[Any] = ..., edge_data: Optional[Any] = ...): ... +def from_dict_of_dicts(d, create_using: Optional[Any] = ..., multigraph_input: bool = ...): ... def to_edgelist(G, nodelist: Optional[Any] = ...): ... def from_edgelist(edgelist, create_using: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/convert_matrix.pyi b/mypy-stubs/networkx/convert_matrix.pyi index c991bddb6..36d42ebaf 100644 --- a/mypy-stubs/networkx/convert_matrix.pyi +++ b/mypy-stubs/networkx/convert_matrix.pyi @@ -38,9 +38,7 @@ def to_numpy_matrix( weight: str = ..., nonedge: float = ..., ): ... -def from_numpy_matrix( - A, parallel_edges: bool = ..., create_using: Optional[Any] = ... -): ... +def from_numpy_matrix(A, parallel_edges: bool = ..., create_using: Optional[Any] = ...): ... def to_numpy_recarray( G, nodelist: Optional[Any] = ..., @@ -69,6 +67,4 @@ def to_numpy_array( weight: str = ..., nonedge: float = ..., ): ... -def from_numpy_array( - A, parallel_edges: bool = ..., create_using: Optional[Any] = ... -): ... +def from_numpy_array(A, parallel_edges: bool = ..., create_using: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/drawing/layout.pyi b/mypy-stubs/networkx/drawing/layout.pyi index 63863ff00..fb70b3a34 100644 --- a/mypy-stubs/networkx/drawing/layout.pyi +++ b/mypy-stubs/networkx/drawing/layout.pyi @@ -7,9 +7,7 @@ from typing import Any, Optional def random_layout( G, center: Optional[Any] = ..., dim: int = ..., random_state: Optional[Any] = ... ): ... -def circular_layout( - G, scale: int = ..., center: Optional[Any] = ..., dim: int = ... -): ... +def circular_layout(G, scale: int = ..., center: Optional[Any] = ..., dim: int = ...): ... def shell_layout( G, nlist: Optional[Any] = ..., diff --git a/mypy-stubs/networkx/drawing/nx_agraph.pyi b/mypy-stubs/networkx/drawing/nx_agraph.pyi index 2e9900dd1..17206d717 100644 --- a/mypy-stubs/networkx/drawing/nx_agraph.pyi +++ b/mypy-stubs/networkx/drawing/nx_agraph.pyi @@ -9,9 +9,7 @@ def to_agraph(N): ... def write_dot(G, path): ... def read_dot(path): ... def graphviz_layout(G, prog: str = ..., root: Optional[Any] = ..., args: str = ...): ... -def pygraphviz_layout( - G, prog: str = ..., root: Optional[Any] = ..., args: str = ... -): ... +def pygraphviz_layout(G, prog: str = ..., root: Optional[Any] = ..., args: str = ...): ... def view_pygraphviz( G, edgelabel: Optional[Any] = ..., diff --git a/mypy-stubs/networkx/drawing/nx_pylab.pyi b/mypy-stubs/networkx/drawing/nx_pylab.pyi index 72bf69163..8142f3675 100644 --- a/mypy-stubs/networkx/drawing/nx_pylab.pyi +++ b/mypy-stubs/networkx/drawing/nx_pylab.pyi @@ -23,7 +23,7 @@ def draw_networkx_nodes( linewidths: Optional[Any] = ..., edgecolors: Optional[Any] = ..., label: Optional[Any] = ..., - **kwds + **kwds, ): ... def draw_networkx_edges( G, @@ -44,7 +44,7 @@ def draw_networkx_edges( node_size: int = ..., nodelist: Optional[Any] = ..., node_shape: str = ..., - **kwds + **kwds, ): ... def draw_networkx_labels( G, @@ -57,7 +57,7 @@ def draw_networkx_labels( alpha: float = ..., bbox: Optional[Any] = ..., ax: Optional[Any] = ..., - **kwds + **kwds, ): ... def draw_networkx_edge_labels( G, @@ -72,7 +72,7 @@ def draw_networkx_edge_labels( bbox: Optional[Any] = ..., ax: Optional[Any] = ..., rotate: bool = ..., - **kwds + **kwds, ): ... def draw_circular(G, **kwargs): ... def draw_kamada_kawai(G, **kwargs): ... diff --git a/mypy-stubs/networkx/generators/community.pyi b/mypy-stubs/networkx/generators/community.pyi index 71dd2e603..fd79aedba 100644 --- a/mypy-stubs/networkx/generators/community.pyi +++ b/mypy-stubs/networkx/generators/community.pyi @@ -7,12 +7,8 @@ from typing import Any, Optional def caveman_graph(l, k): ... def connected_caveman_graph(l, k): ... def relaxed_caveman_graph(l, k, p, seed: Optional[Any] = ...): ... -def random_partition_graph( - sizes, p_in, p_out, seed: Optional[Any] = ..., directed: bool = ... -): ... -def planted_partition_graph( - l, k, p_in, p_out, seed: Optional[Any] = ..., directed: bool = ... -): ... +def random_partition_graph(sizes, p_in, p_out, seed: Optional[Any] = ..., directed: bool = ...): ... +def planted_partition_graph(l, k, p_in, p_out, seed: Optional[Any] = ..., directed: bool = ...): ... def gaussian_random_partition_graph( n, s, v, p_in, p_out, directed: bool = ..., seed: Optional[Any] = ... ): ... diff --git a/mypy-stubs/networkx/generators/degree_seq.pyi b/mypy-stubs/networkx/generators/degree_seq.pyi index 85d846470..44d8f79b7 100644 --- a/mypy-stubs/networkx/generators/degree_seq.pyi +++ b/mypy-stubs/networkx/generators/degree_seq.pyi @@ -19,9 +19,7 @@ def directed_havel_hakimi_graph( in_deg_sequence, out_deg_sequence, create_using: Optional[Any] = ... ): ... def degree_sequence_tree(deg_sequence, create_using: Optional[Any] = ...): ... -def random_degree_sequence_graph( - sequence, seed: Optional[Any] = ..., tries: int = ... -): ... +def random_degree_sequence_graph(sequence, seed: Optional[Any] = ..., tries: int = ...): ... class DegreeSequenceRandomGraph: degree: Any = ... diff --git a/mypy-stubs/networkx/generators/directed.pyi b/mypy-stubs/networkx/generators/directed.pyi index 41015ef79..0ec9edff1 100644 --- a/mypy-stubs/networkx/generators/directed.pyi +++ b/mypy-stubs/networkx/generators/directed.pyi @@ -22,6 +22,4 @@ def scale_free_graph( create_using: Optional[Any] = ..., seed: Optional[Any] = ..., ): ... -def random_k_out_graph( - n, k, alpha, self_loops: bool = ..., seed: Optional[Any] = ... -): ... +def random_k_out_graph(n, k, alpha, self_loops: bool = ..., seed: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/generators/geometric.pyi b/mypy-stubs/networkx/generators/geometric.pyi index 051b8e5be..5d25162da 100644 --- a/mypy-stubs/networkx/generators/geometric.pyi +++ b/mypy-stubs/networkx/generators/geometric.pyi @@ -4,9 +4,7 @@ from typing import Any, Optional -def random_geometric_graph( - n, radius, dim: int = ..., pos: Optional[Any] = ..., p: int = ... -): ... +def random_geometric_graph(n, radius, dim: int = ..., pos: Optional[Any] = ..., p: int = ...): ... def soft_random_geometric_graph( n, radius, diff --git a/mypy-stubs/networkx/generators/random_graphs.pyi b/mypy-stubs/networkx/generators/random_graphs.pyi index 16fc313df..ebd3f1357 100644 --- a/mypy-stubs/networkx/generators/random_graphs.pyi +++ b/mypy-stubs/networkx/generators/random_graphs.pyi @@ -14,18 +14,14 @@ def dense_gnm_random_graph(n, m, seed: Optional[Any] = ...): ... def gnm_random_graph(n, m, seed: Optional[Any] = ..., directed: bool = ...): ... def newman_watts_strogatz_graph(n, k, p, seed: Optional[Any] = ...): ... def watts_strogatz_graph(n, k, p, seed: Optional[Any] = ...): ... -def connected_watts_strogatz_graph( - n, k, p, tries: int = ..., seed: Optional[Any] = ... -): ... +def connected_watts_strogatz_graph(n, k, p, tries: int = ..., seed: Optional[Any] = ...): ... def random_regular_graph(d, n, seed: Optional[Any] = ...): ... def barabasi_albert_graph(n, m, seed: Optional[Any] = ...): ... def extended_barabasi_albert_graph(n, m, p, q, seed: Optional[Any] = ...): ... def powerlaw_cluster_graph(n, m, p, seed: Optional[Any] = ...): ... def random_lobster(n, p1, p2, seed: Optional[Any] = ...): ... def random_shell_graph(constructor, seed: Optional[Any] = ...): ... -def random_powerlaw_tree( - n, gamma: int = ..., seed: Optional[Any] = ..., tries: int = ... -): ... +def random_powerlaw_tree(n, gamma: int = ..., seed: Optional[Any] = ..., tries: int = ...): ... def random_powerlaw_tree_sequence( n, gamma: int = ..., seed: Optional[Any] = ..., tries: int = ... ): ... diff --git a/mypy-stubs/networkx/linalg/laplacianmatrix.pyi b/mypy-stubs/networkx/linalg/laplacianmatrix.pyi index 70b758f3b..8068723b8 100644 --- a/mypy-stubs/networkx/linalg/laplacianmatrix.pyi +++ b/mypy-stubs/networkx/linalg/laplacianmatrix.pyi @@ -5,9 +5,7 @@ from typing import Any, Optional def laplacian_matrix(G, nodelist: Optional[Any] = ..., weight: str = ...): ... -def normalized_laplacian_matrix( - G, nodelist: Optional[Any] = ..., weight: str = ... -): ... +def normalized_laplacian_matrix(G, nodelist: Optional[Any] = ..., weight: str = ...): ... def directed_laplacian_matrix( G, nodelist: Optional[Any] = ..., diff --git a/mypy-stubs/networkx/linalg/modularitymatrix.pyi b/mypy-stubs/networkx/linalg/modularitymatrix.pyi index 5e91a0993..05b07df19 100644 --- a/mypy-stubs/networkx/linalg/modularitymatrix.pyi +++ b/mypy-stubs/networkx/linalg/modularitymatrix.pyi @@ -4,9 +4,5 @@ from typing import Any, Optional -def modularity_matrix( - G, nodelist: Optional[Any] = ..., weight: Optional[Any] = ... -): ... -def directed_modularity_matrix( - G, nodelist: Optional[Any] = ..., weight: Optional[Any] = ... -): ... +def modularity_matrix(G, nodelist: Optional[Any] = ..., weight: Optional[Any] = ...): ... +def directed_modularity_matrix(G, nodelist: Optional[Any] = ..., weight: Optional[Any] = ...): ... diff --git a/mypy-stubs/networkx/readwrite/adjlist.pyi b/mypy-stubs/networkx/readwrite/adjlist.pyi index eb9cb374e..d15d738f7 100644 --- a/mypy-stubs/networkx/readwrite/adjlist.pyi +++ b/mypy-stubs/networkx/readwrite/adjlist.pyi @@ -5,9 +5,7 @@ from typing import Any, Optional def generate_adjlist(G, delimiter: str = ...): ... -def write_adjlist( - G, path, comments: str = ..., delimiter: str = ..., encoding: str = ... -): ... +def write_adjlist(G, path, comments: str = ..., delimiter: str = ..., encoding: str = ...): ... def parse_adjlist( lines, comments: str = ..., diff --git a/mypy-stubs/networkx/readwrite/gexf.pyi b/mypy-stubs/networkx/readwrite/gexf.pyi index ee02cb295..e4e0ad61e 100644 --- a/mypy-stubs/networkx/readwrite/gexf.pyi +++ b/mypy-stubs/networkx/readwrite/gexf.pyi @@ -4,15 +4,9 @@ from typing import Any, Optional -def write_gexf( - G, path, encoding: str = ..., prettyprint: bool = ..., version: str = ... -): ... -def generate_gexf( - G, encoding: str = ..., prettyprint: bool = ..., version: str = ... -): ... -def read_gexf( - path, node_type: Optional[Any] = ..., relabel: bool = ..., version: str = ... -): ... +def write_gexf(G, path, encoding: str = ..., prettyprint: bool = ..., version: str = ...): ... +def generate_gexf(G, encoding: str = ..., prettyprint: bool = ..., version: str = ...): ... +def read_gexf(path, node_type: Optional[Any] = ..., relabel: bool = ..., version: str = ...): ... class GEXF: versions: Any = ... diff --git a/mypy-stubs/networkx/readwrite/json_graph/adjacency.pyi b/mypy-stubs/networkx/readwrite/json_graph/adjacency.pyi index 82a05102e..c540eb8a3 100644 --- a/mypy-stubs/networkx/readwrite/json_graph/adjacency.pyi +++ b/mypy-stubs/networkx/readwrite/json_graph/adjacency.pyi @@ -5,6 +5,4 @@ from typing import Any def adjacency_data(G, attrs: Any = ...): ... -def adjacency_graph( - data, directed: bool = ..., multigraph: bool = ..., attrs: Any = ... -): ... +def adjacency_graph(data, directed: bool = ..., multigraph: bool = ..., attrs: Any = ...): ... diff --git a/mypy-stubs/networkx/readwrite/nx_shp.pyi b/mypy-stubs/networkx/readwrite/nx_shp.pyi index a9109b51c..c2d9deb67 100644 --- a/mypy-stubs/networkx/readwrite/nx_shp.pyi +++ b/mypy-stubs/networkx/readwrite/nx_shp.pyi @@ -2,7 +2,5 @@ # # NOTE: This dynamically typed stub was automatically generated by stubgen. -def read_shp( - path, simplify: bool = ..., geom_attrs: bool = ..., strict: bool = ... -): ... +def read_shp(path, simplify: bool = ..., geom_attrs: bool = ..., strict: bool = ...): ... def write_shp(G, outdir): ... diff --git a/mypy-stubs/networkx/utils/random_sequence.pyi b/mypy-stubs/networkx/utils/random_sequence.pyi index 7ff56e9ee..b90e8219b 100644 --- a/mypy-stubs/networkx/utils/random_sequence.pyi +++ b/mypy-stubs/networkx/utils/random_sequence.pyi @@ -7,8 +7,6 @@ from typing import Any, Optional def powerlaw_sequence(n, exponent: float = ...): ... def zipf_rv(alpha, xmin: int = ..., seed: Optional[Any] = ...): ... def cumulative_distribution(distribution): ... -def discrete_sequence( - n, distribution: Optional[Any] = ..., cdistribution: Optional[Any] = ... -): ... +def discrete_sequence(n, distribution: Optional[Any] = ..., cdistribution: Optional[Any] = ...): ... def random_weighted_sample(mapping, k): ... def weighted_choice(mapping): ... diff --git a/mypy-stubs/prov/constants.py b/mypy-stubs/prov/constants.py index f711d00dd..314224a63 100644 --- a/mypy-stubs/prov/constants.py +++ b/mypy-stubs/prov/constants.py @@ -170,9 +170,7 @@ PROV_ATTRIBUTES = PROV_ATTRIBUTE_QNAMES | PROV_ATTRIBUTE_LITERALS PROV_RECORD_ATTRIBUTES = list((attr, str(attr)) for attr in PROV_ATTRIBUTES) -PROV_RECORD_IDS_MAP = dict( - (PROV_N_MAP[rec_type_id], rec_type_id) for rec_type_id in PROV_N_MAP -) +PROV_RECORD_IDS_MAP = dict((PROV_N_MAP[rec_type_id], rec_type_id) for rec_type_id in PROV_N_MAP) PROV_ID_ATTRIBUTES_MAP = dict( (prov_id, attribute) for (prov_id, attribute) in PROV_RECORD_ATTRIBUTES ) diff --git a/mypy-stubs/prov/model.pyi b/mypy-stubs/prov/model.pyi index 57ac5cb1e..cb1ba76db 100644 --- a/mypy-stubs/prov/model.pyi +++ b/mypy-stubs/prov/model.pyi @@ -228,9 +228,7 @@ class ProvBundle: | Tuple[type | type[int | str] | Tuple[Any, ...], ...] | None = ..., ) -> List[ProvRecord]: ... - def get_record( - self, identifier: Identifier | None - ) -> ProvRecord | List[ProvRecord] | None: ... + def get_record(self, identifier: Identifier | None) -> ProvRecord | List[ProvRecord] | None: ... def is_document(self) -> bool: ... def is_bundle(self) -> bool: ... def has_bundles(self) -> bool: ... @@ -400,9 +398,7 @@ class ProvBundle: identifier: str, other_attributes: _attributes_type | None, ) -> ProvRecord: ... - def membership( - self, collection: ProvRecord, entity: ProvEntity | str - ) -> ProvRecord: ... + def membership(self, collection: ProvRecord, entity: ProvEntity | str) -> ProvRecord: ... def plot( self, filename: str | None = ..., @@ -444,9 +440,7 @@ class ProvDocument(ProvBundle): def flattened(self) -> ProvDocument: ... def unified(self) -> ProvDocument: ... def update(self, other: ProvDocument | ProvBundle) -> None: ... - def add_bundle( - self, bundle: ProvBundle, identifier: Incomplete | None = ... - ) -> None: ... + def add_bundle(self, bundle: ProvBundle, identifier: Incomplete | None = ...) -> None: ... def bundle(self, identifier: Identifier) -> ProvBundle: ... def serialize( self, destination: IO[Any] | None = ..., format: str = ..., **args: Any @@ -456,7 +450,7 @@ class ProvDocument(ProvBundle): source: IO[Any] | str | None = ..., content: str | None = ..., format: str = ..., - **args: Any + **args: Any, ) -> ProvDocument: ... def sorted_attributes(element: ProvElement, attributes: List[str]) -> List[str]: ... diff --git a/mypy-stubs/pydot.pyi b/mypy-stubs/pydot.pyi index 93464c1d4..bd0ab3147 100644 --- a/mypy-stubs/pydot.pyi +++ b/mypy-stubs/pydot.pyi @@ -33,9 +33,7 @@ class InvocationException(Exception): class Node(Common): obj_dict: Any - def __init__( - self, name: str = ..., obj_dict: Any | None = ..., **attrs: str - ) -> None: ... + def __init__(self, name: str = ..., obj_dict: Any | None = ..., **attrs: str) -> None: ... def set_name(self, node_name: str) -> None: ... def get_name(self) -> str: ... def get_port(self) -> str: ... @@ -49,7 +47,7 @@ class Edge(Common): src: str = ..., dst: str = ..., obj_dict: Any | None = ..., - **attrs: Dict[str, str] + **attrs: Dict[str, str], ) -> None: ... def get_source(self) -> str: ... def get_destination(self) -> str: ... @@ -68,7 +66,7 @@ class Graph(Common): strict: bool = ..., suppress_disconnected: bool = ..., simplify: bool = ..., - **attrs: Dict[str, str] + **attrs: Dict[str, str], ) -> None: ... def get_graph_type(self) -> str: ... def get_top_graph_type(self) -> str: ... @@ -120,7 +118,7 @@ class Subgraph(Graph): obj_dict: Any | Dict[str, str] = ..., suppress_disconnected: bool = ..., simplify: bool = ..., - **attrs: Dict[str, str] + **attrs: Dict[str, str], ) -> None: ... class Cluster(Graph): @@ -130,7 +128,7 @@ class Cluster(Graph): obj_dict: Any | Dict[str, str] = ..., suppress_disconnected: bool = ..., simplify: bool = ..., - **attrs: Dict[str, str] + **attrs: Dict[str, str], ) -> None: ... class Dot(Graph): diff --git a/mypy-stubs/rdflib/graph.pyi b/mypy-stubs/rdflib/graph.pyi index 23c2e6e1f..d3e6f2f54 100644 --- a/mypy-stubs/rdflib/graph.pyi +++ b/mypy-stubs/rdflib/graph.pyi @@ -172,9 +172,7 @@ class Graph(Node): data: Optional[Any] = ..., **args: Any, ) -> "Graph": ... - def load( - self, source: Any, publicID: Optional[Any] = ..., format: str = ... - ) -> "Graph": ... + def load(self, source: Any, publicID: Optional[Any] = ..., format: str = ...) -> "Graph": ... def query( self, query_object: Any, diff --git a/mypy-stubs/rdflib/query.pyi b/mypy-stubs/rdflib/query.pyi index 73f4008b3..981fe12d2 100644 --- a/mypy-stubs/rdflib/query.pyi +++ b/mypy-stubs/rdflib/query.pyi @@ -31,12 +31,12 @@ class Result: source: IO[Any] | None = ..., format: str | None = ..., content_type: str | None = ..., - **kwargs: Any + **kwargs: Any, ) -> Result: ... def serialize( self, destination: str | IO[Any] | None = ..., encoding: str = ..., format: str = ..., - **args: Any + **args: Any, ) -> Optional[bytes]: ... diff --git a/mypy-stubs/rdflib/resource.pyi b/mypy-stubs/rdflib/resource.pyi index e520dbe66..0dd3b988e 100644 --- a/mypy-stubs/rdflib/resource.pyi +++ b/mypy-stubs/rdflib/resource.pyi @@ -23,11 +23,7 @@ class Resource: def label(self) -> Any: ... def comment(self) -> Any: ... def items(self) -> Iterator[Any]: ... - def transitive_objects( - self, predicate: Node, remember: Any | None = ... - ) -> Iterator[Any]: ... - def transitive_subjects( - self, predicate: Node, remember: Any | None = ... - ) -> Iterator[Any]: ... + def transitive_objects(self, predicate: Node, remember: Any | None = ...) -> Iterator[Any]: ... + def transitive_subjects(self, predicate: Node, remember: Any | None = ...) -> Iterator[Any]: ... def seq(self) -> Seq | None: ... def qname(self) -> Any: ... diff --git a/mypy-stubs/subprocess.pyi b/mypy-stubs/subprocess.pyi index 880ab4736..4af9814fa 100644 --- a/mypy-stubs/subprocess.pyi +++ b/mypy-stubs/subprocess.pyi @@ -1029,9 +1029,7 @@ class Popen(Generic[AnyStr]): if sys.version_info >= (3, 7): def wait(self, timeout: float | None = ...) -> int: ... else: - def wait( - self, timeout: float | None = ..., endtime: float | None = ... - ) -> int: ... + def wait(self, timeout: float | None = ..., endtime: float | None = ...) -> int: ... # Return str/bytes def communicate( self, diff --git a/mypy-stubs/urllib/parse.pyi b/mypy-stubs/urllib/parse.pyi index 3e14b3ef6..81b2db34e 100644 --- a/mypy-stubs/urllib/parse.pyi +++ b/mypy-stubs/urllib/parse.pyi @@ -124,16 +124,12 @@ def parse_qsl( errors: str = ..., ) -> List[Tuple[AnyStr, AnyStr]]: ... @overload -def quote( - string: str, safe: _Str = ..., encoding: str = ..., errors: str = ... -) -> str: ... +def quote(string: str, safe: _Str = ..., encoding: str = ..., errors: str = ...) -> str: ... @overload def quote(string: bytes, safe: _Str = ...) -> str: ... def quote_from_bytes(bs: bytes, safe: _Str = ...) -> str: ... @overload -def quote_plus( - string: str, safe: _Str = ..., encoding: str = ..., errors: str = ... -) -> str: ... +def quote_plus(string: str, safe: _Str = ..., encoding: str = ..., errors: str = ...) -> str: ... @overload def quote_plus(string: bytes, safe: _Str = ...) -> str: ... def unquote(string: str, encoding: str = ..., errors: str = ...) -> str: ... @@ -177,25 +173,15 @@ def urljoin( base: Optional[AnyStr], url: Optional[AnyStr], allow_fragments: bool = ... ) -> AnyStr: ... @overload -def urlparse( - url: str, scheme: str = ..., allow_fragments: bool = ... -) -> ParseResult: ... +def urlparse(url: str, scheme: str = ..., allow_fragments: bool = ...) -> ParseResult: ... @overload -def urlparse( - url: bytes, scheme: bytes = ..., allow_fragments: bool = ... -) -> ParseResultBytes: ... +def urlparse(url: bytes, scheme: bytes = ..., allow_fragments: bool = ...) -> ParseResultBytes: ... @overload -def urlsplit( - url: Optional[str], scheme: str = ..., allow_fragments: bool = ... -) -> SplitResult: ... +def urlsplit(url: Optional[str], scheme: str = ..., allow_fragments: bool = ...) -> SplitResult: ... @overload -def urlsplit( - url: bytes, scheme: bytes = ..., allow_fragments: bool = ... -) -> SplitResultBytes: ... +def urlsplit(url: bytes, scheme: bytes = ..., allow_fragments: bool = ...) -> SplitResultBytes: ... @overload -def urlunparse( - components: Tuple[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr, AnyStr] -) -> AnyStr: ... +def urlunparse(components: Tuple[AnyStr, AnyStr, AnyStr, AnyStr, AnyStr, AnyStr]) -> AnyStr: ... @overload def urlunparse(components: Sequence[AnyStr]) -> AnyStr: ... @overload diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..fcffb3983 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = [ + "setuptools>=45", + 'mypy==0.971; python_version == "3.6"', # last version for Python 3.6 + 'mypy==0.991; python_version >= "3.7"', + "types-pkg_resources", + "types-requests", + "types-psutil", + "ruamel.yaml>=0.16.0,<0.17.22", + "schema-salad>=8.2.20211104054942,<9", + "cwl-utils >=0.19", + 'toml', +] +build-backend = "setuptools.build_meta" + +[tool.black] +line-length = 100 +target-version = [ "py36" ] diff --git a/tests/test_content_type.py b/tests/test_content_type.py index 9f11880c7..0c3662f0d 100644 --- a/tests/test_content_type.py +++ b/tests/test_content_type.py @@ -13,10 +13,7 @@ def test_content_types(caplog: LogCaptureFixture) -> None: found = False for record in caplog.records: - if ( - record.name == "salad" - and "got content-type of 'text/html'" in record.message - ): + if record.name == "salad" and "got content-type of 'text/html'" in record.message: found = True break assert found diff --git a/tests/test_cuda.py b/tests/test_cuda.py index bbd9672df..68fc62062 100644 --- a/tests/test_cuda.py +++ b/tests/test_cuda.py @@ -23,9 +23,7 @@ @needs_docker -@pytest.mark.skipif( - cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected" -) +@pytest.mark.skipif(cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected") def test_cuda_docker() -> None: params = [ "--enable-ext", @@ -35,9 +33,7 @@ def test_cuda_docker() -> None: @needs_singularity_3_or_newer -@pytest.mark.skipif( - cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected" -) +@pytest.mark.skipif(cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected") def test_cuda_singularity() -> None: params = [ "--enable-ext", @@ -47,9 +43,7 @@ def test_cuda_singularity() -> None: assert main(params) == 0 -@pytest.mark.skipif( - cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected" -) +@pytest.mark.skipif(cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected") def test_cuda_no_container() -> None: params = [ "--enable-ext", @@ -58,9 +52,7 @@ def test_cuda_no_container() -> None: assert main(params) == 0 -@pytest.mark.skipif( - cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected" -) +@pytest.mark.skipif(cuda_version[0] == "", reason="nvidia-smi required for CUDA not detected") def test_cuda_cc_list() -> None: params = [ "--enable-ext", @@ -100,7 +92,6 @@ def _makebuilder(cudaReq: CWLObjectType) -> Builder: @mock.patch("subprocess.check_output") @mock.patch("os.makedirs") def test_cuda_job_setup_check(makedirs: MagicMock, check_output: MagicMock) -> None: - runtime_context = RuntimeContext({}) cudaReq: CWLObjectType = { @@ -124,7 +115,6 @@ def test_cuda_job_setup_check(makedirs: MagicMock, check_output: MagicMock) -> N @mock.patch("subprocess.check_output") @mock.patch("os.makedirs") def test_cuda_job_setup_check_err(makedirs: MagicMock, check_output: MagicMock) -> None: - runtime_context = RuntimeContext({}) cudaReq: CWLObjectType = { diff --git a/tests/test_default_path.py b/tests/test_default_path.py index 005bcf0a2..a87e30331 100644 --- a/tests/test_default_path.py +++ b/tests/test_default_path.py @@ -5,12 +5,8 @@ def test_default_path() -> None: """Error is not raised when default path is not present.""" - loadingContext, workflowobj, uri = fetch_document( - get_data("tests/wf/default_path.cwl") - ) - loadingContext, uri = resolve_and_validate_document( - loadingContext, workflowobj, uri - ) + loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/default_path.cwl")) + loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri) loader = loadingContext.loader assert loader processobj = loader.resolve_ref(uri)[0] diff --git a/tests/test_dependencies.py b/tests/test_dependencies.py index 82171aa72..d91574a29 100644 --- a/tests/test_dependencies.py +++ b/tests/test_dependencies.py @@ -38,8 +38,7 @@ def test_biocontainers_resolution(tmp_path: Path) -> None: """Confirm expected container name for --beta-use-biocontainers.""" tool = load_tool(get_data("tests/seqtk_seq.cwl"), LoadingContext()) assert ( - get_container_from_software_requirements(True, tool) - == "quay.io/biocontainers/seqtk:r93--0" + get_container_from_software_requirements(True, tool) == "quay.io/biocontainers/seqtk:r93--0" ) @@ -60,9 +59,7 @@ def test_modules(monkeypatch: pytest.MonkeyPatch) -> None: """Do a basic smoke test using environment modules to satisfy a SoftwareRequirement.""" wflow = get_data("tests/random_lines.cwl") job = get_data("tests/random_lines_job.json") - monkeypatch.setenv( - "MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles") - ) + monkeypatch.setenv("MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles")) error_code, _, stderr = get_main_output( [ "--beta-dependency-resolvers-configuration", @@ -84,9 +81,7 @@ def test_modules_environment(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Do so by by running `env` as the tool and parsing its output. """ - monkeypatch.setenv( - "MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles") - ) + monkeypatch.setenv("MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles")) tool_env = get_tool_env( tmp_path, [ diff --git a/tests/test_docker_paths_with_colons.py b/tests/test_docker_paths_with_colons.py index c80a13a1d..ae56de50e 100644 --- a/tests/test_docker_paths_with_colons.py +++ b/tests/test_docker_paths_with_colons.py @@ -7,15 +7,10 @@ def test_docker_append_volume_read_only(mocker: Any) -> None: mocker.patch("os.mkdir") runtime = ["runtime"] characters = ":,\"'" - DockerCommandLineJob.append_volume( - runtime, "/source" + characters, "/target" + characters - ) + DockerCommandLineJob.append_volume(runtime, "/source" + characters, "/target" + characters) assert runtime == [ "runtime", - "--mount=type=bind," - '"source=/source:,""\'",' - '"target=/target:,""\'",' - "readonly", + "--mount=type=bind," '"source=/source:,""\'",' '"target=/target:,""\'",' "readonly", ] diff --git a/tests/test_environment.py b/tests/test_environment.py index e1db948ab..0fad65d7f 100644 --- a/tests/test_environment.py +++ b/tests/test_environment.py @@ -210,9 +210,7 @@ def test_basic(crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any) -> Non @CRT_PARAMS -def test_preserve_single( - crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any -) -> None: +def test_preserve_single(crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any) -> None: """Test that preserving a single env var works.""" tmp_prefix = str(tmp_path / "canary") extra_env = { @@ -236,9 +234,7 @@ def test_preserve_single( @CRT_PARAMS -def test_preserve_all( - crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any -) -> None: +def test_preserve_all(crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any) -> None: """Test that preserving all works.""" tmp_prefix = str(tmp_path / "canary") extra_env = { diff --git a/tests/test_examples.py b/tests/test_examples.py index 52c294e90..d9dd1ada8 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -195,9 +195,7 @@ def test_parameter_to_expression(pattern: str, expected: Any) -> None: ) -@pytest.mark.parametrize( - "pattern,expected,behavior", param_to_expr_interpolate_escapebehavior -) +@pytest.mark.parametrize("pattern,expected,behavior", param_to_expr_interpolate_escapebehavior) def test_parameter_to_expression_interpolate_escapebehavior( pattern: str, expected: str, behavior: int ) -> None: @@ -279,14 +277,9 @@ def test_expression_interpolate_failures(pattern: str) -> None: @pytest.mark.parametrize("pattern,expected,behavior", interpolate_escapebehavior) -def test_expression_interpolate_escapebehavior( - pattern: str, expected: str, behavior: int -) -> None: +def test_expression_interpolate_escapebehavior(pattern: str, expected: str, behavior: int) -> None: """Test escaping behavior in an interpolation context.""" - assert ( - expr.interpolate(pattern, interpolate_input, escaping_behavior=behavior) - == expected - ) + assert expr.interpolate(pattern, interpolate_input, escaping_behavior=behavior) == expected def test_factory() -> None: @@ -330,13 +323,9 @@ def test_factory_partial_scatter() -> None: result = err_info.value.out assert isinstance(result, dict) - assert ( - result["out"][0]["checksum"] == "sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e" - ) + assert result["out"][0]["checksum"] == "sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e" assert result["out"][1] is None - assert ( - result["out"][2]["checksum"] == "sha1$a3db5c13ff90a36963278c6a39e4ee3c22e2a436" - ) + assert result["out"][2]["checksum"] == "sha1$a3db5c13ff90a36963278c6a39e4ee3c22e2a436" def test_factory_partial_output() -> None: @@ -812,16 +801,12 @@ def test_compare_types_strict( ] -@pytest.mark.parametrize( - "src_type,sink_type,link_merge,value_from,expected_type", typechecks -) +@pytest.mark.parametrize("src_type,sink_type,link_merge,value_from,expected_type", typechecks) def test_typechecking( src_type: Any, sink_type: Any, link_merge: str, value_from: Any, expected_type: str ) -> None: assert ( - cwltool.checker.check_types( - src_type, sink_type, linkMerge=link_merge, valueFrom=value_from - ) + cwltool.checker.check_types(src_type, sink_type, linkMerge=link_merge, valueFrom=value_from) == expected_type ) @@ -1002,9 +987,7 @@ def test_var_spool_cwl_checker3() -> None: factory = cwltool.factory.Factory() try: factory.make(get_data("tests/portable.cwl")) - assert ( - "Non-portable reference to /var/spool/cwl detected" not in stream.getvalue() - ) + assert "Non-portable reference to /var/spool/cwl detected" not in stream.getvalue() finally: _logger.removeHandler(streamhandler) @@ -1062,12 +1045,8 @@ def test_print_dot() -> None: stdout = StringIO() assert main(["--debug", "--print-dot", cwl_path], stdout=stdout) == 0 computed_dot = pydot.graph_from_dot_data(stdout.getvalue())[0] - computed_edges = sorted( - (source, target) for source, target in computed_dot.obj_dict["edges"] - ) - expected_edges = sorted( - (source, target) for source, target in expected_dot.obj_dict["edges"] - ) + computed_edges = sorted((source, target) for source, target in computed_dot.obj_dict["edges"]) + expected_edges = sorted((source, target) for source, target in expected_dot.obj_dict["edges"]) assert computed_edges == expected_edges # print CommandLineTool @@ -1080,14 +1059,10 @@ def test_print_dot() -> None: @pytest.mark.parametrize("factor", test_factors) -def test_js_console_cmd_line_tool( - factor: str, caplog: pytest.LogCaptureFixture -) -> None: +def test_js_console_cmd_line_tool(factor: str, caplog: pytest.LogCaptureFixture) -> None: for test_file in ("js_output.cwl", "js_output_workflow.cwl"): commands = factor.split() - commands.extend( - ["--js-console", "--no-container", get_data("tests/wf/" + test_file)] - ) + commands.extend(["--js-console", "--no-container", get_data("tests/wf/" + test_file)]) error_code, _, _ = get_main_output(commands) logging_output = "\n".join([record.message for record in caplog.records]) assert "[log] Log message" in logging_output @@ -1115,9 +1090,7 @@ def test_cid_file_dir(tmp_path: Path, factor: str) -> None: test_file = "cache_test_workflow.cwl" with working_directory(tmp_path): commands = factor.split() - commands.extend( - ["--cidfile-dir", str(tmp_path), get_data("tests/wf/" + test_file)] - ) + commands.extend(["--cidfile-dir", str(tmp_path), get_data("tests/wf/" + test_file)]) error_code, stdout, stderr = get_main_output(commands) stderr = re.sub(r"\s\s+", " ", stderr) assert "completed success" in stderr @@ -1135,9 +1108,7 @@ def test_cid_file_dir_arg_is_file_instead_of_dir(tmp_path: Path, factor: str) -> bad_cidfile_dir = tmp_path / "cidfile-dir-actually-a-file" bad_cidfile_dir.touch() commands = factor.split() - commands.extend( - ["--cidfile-dir", str(bad_cidfile_dir), get_data("tests/wf/" + test_file)] - ) + commands.extend(["--cidfile-dir", str(bad_cidfile_dir), get_data("tests/wf/" + test_file)]) error_code, _, stderr = get_main_output(commands) stderr = re.sub(r"\s\s+", " ", stderr) assert "is not a directory, please check it first" in stderr, stderr @@ -1421,8 +1392,7 @@ def test_bad_stderr_expr_error() -> None: assert error_code == 1 stderr = re.sub(r"\s\s+", " ", stderr) assert ( - "'stderr' expression must return a string. Got 1111 for '$(inputs.file1.size)'." - in stderr + "'stderr' expression must return a string. Got 1111 for '$(inputs.file1.size)'." in stderr ) @@ -1438,8 +1408,7 @@ def test_bad_stdout_expr_error() -> None: assert error_code == 1 stderr = re.sub(r"\s\s+", " ", stderr) assert ( - "'stdout' expression must return a string. Got 1111 for '$(inputs.file1.size)'." - in stderr + "'stdout' expression must return a string. Got 1111 for '$(inputs.file1.size)'." in stderr ) @@ -1613,9 +1582,7 @@ def test_env_filtering(factor: str) -> None: def test_v1_0_arg_empty_prefix_separate_false() -> None: test_file = "tests/arg-empty-prefix-separate-false.cwl" - error_code, stdout, stderr = get_main_output( - ["--debug", get_data(test_file), "--echo"] - ) + error_code, stdout, stderr = get_main_output(["--debug", get_data(test_file), "--echo"]) stderr = re.sub(r"\s\s+", " ", stderr) assert "completed success" in stderr assert error_code == 0 @@ -1683,10 +1650,7 @@ def test_arguments_self() -> None: outputs = cast(Dict[str, Any], check()) assert "self_review" in outputs assert len(outputs) == 1 - assert ( - outputs["self_review"]["checksum"] - == "sha1$724ba28f4a9a1b472057ff99511ed393a45552e1" - ) + assert outputs["self_review"]["checksum"] == "sha1$724ba28f4a9a1b472057ff99511ed393a45552e1" def test_bad_timelimit_expr() -> None: @@ -1776,9 +1740,7 @@ def test_record_default_with_long() -> None: assert result["sixth"]["class"] == "File" assert result["sixth"]["basename"] == "whale.txt" assert result["sixth"]["size"] == 1111 - assert ( - result["sixth"]["checksum"] == "sha1$327fc7aedf4f6b69a42a7c8b808dc5a7aff61376" - ) + assert result["sixth"]["checksum"] == "sha1$327fc7aedf4f6b69a42a7c8b808dc5a7aff61376" def test_record_outputeval(tmp_path: Path) -> None: @@ -1790,21 +1752,12 @@ def test_record_outputeval(tmp_path: Path) -> None: assert "genome_fa" in result assert result["genome_fa"]["class"] == "File" assert result["genome_fa"]["basename"] == "GRCm38.primary_assembly.genome.fa" - assert ( - result["genome_fa"]["checksum"] - == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709" - ) + assert result["genome_fa"]["checksum"] == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709" assert result["genome_fa"]["size"] == 0 assert "annotation_gtf" in result assert result["annotation_gtf"]["class"] == "File" - assert ( - result["annotation_gtf"]["basename"] - == "gencode.vM21.primary_assembly.annotation.gtf" - ) - assert ( - result["annotation_gtf"]["checksum"] - == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709" - ) + assert result["annotation_gtf"]["basename"] == "gencode.vM21.primary_assembly.annotation.gtf" + assert result["annotation_gtf"]["checksum"] == "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709" assert result["annotation_gtf"]["size"] == 0 diff --git a/tests/test_ext.py b/tests/test_ext.py index 1eb4091e5..ffda59c5e 100644 --- a/tests/test_ext.py +++ b/tests/test_ext.py @@ -14,10 +14,7 @@ @needs_docker def test_missing_enable_ext() -> None: # Require that --enable-ext is provided. - assert ( - main([get_data("tests/wf/listing_deep.cwl"), get_data("tests/listing-job.yml")]) - != 0 - ) + assert main([get_data("tests/wf/listing_deep.cwl"), get_data("tests/listing-job.yml")]) != 0 @needs_docker @@ -67,20 +64,14 @@ def test_listing_none() -> None: @needs_docker def test_listing_v1_0() -> None: # Default behavior in 1.0 is deep expansion. - assert ( - main([get_data("tests/wf/listing_v1_0.cwl"), get_data("tests/listing-job.yml")]) - == 0 - ) + assert main([get_data("tests/wf/listing_v1_0.cwl"), get_data("tests/listing-job.yml")]) == 0 @pytest.mark.skip(reason="This is not the default behaviour yet") @needs_docker def test_listing_v1_1() -> None: # Default behavior in 1.1 will be no expansion - assert ( - main([get_data("tests/wf/listing_v1_1.cwl"), get_data("tests/listing-job.yml")]) - != 0 - ) + assert main([get_data("tests/wf/listing_v1_1.cwl"), get_data("tests/listing-job.yml")]) != 0 @needs_docker @@ -156,10 +147,7 @@ def test_disable_dir_overwrite_without_ext(tmp_path: Path) -> None: out = tmp_path / "outdir" tmp.mkdir() out.mkdir() - assert ( - main(["--outdir", str(out), get_data("tests/wf/updatedir.cwl"), "-r", str(tmp)]) - == 0 - ) + assert main(["--outdir", str(out), get_data("tests/wf/updatedir.cwl"), "-r", str(tmp)]) == 0 assert not os.listdir(tmp) assert os.listdir(out) @@ -226,9 +214,7 @@ def test_write_write_conflict(tmp_path: Path) -> None: with open(tmp_name, "w") as f: f.write(before_value) - assert ( - main(["--enable-ext", get_data("tests/wf/mut.cwl"), "-a", str(tmp_name)]) != 0 - ) + assert main(["--enable-ext", get_data("tests/wf/mut.cwl"), "-a", str(tmp_name)]) != 0 with open(tmp_name) as f: tmp_value = f.read() @@ -243,9 +229,7 @@ def test_read_write_conflict(tmp_path: Path) -> None: with open(tmp_name, "w") as f: f.write("1") - assert ( - main(["--enable-ext", get_data("tests/wf/mut3.cwl"), "-a", str(tmp_name)]) != 0 - ) + assert main(["--enable-ext", get_data("tests/wf/mut3.cwl"), "-a", str(tmp_name)]) != 0 @needs_docker @@ -289,9 +273,8 @@ def test_warn_large_inputs() -> None: stderr=stream, ) - assert ( - "Recursive directory listing has resulted in a large number of File" - in re.sub("\n *", " ", stream.getvalue()) + assert "Recursive directory listing has resulted in a large number of File" in re.sub( + "\n *", " ", stream.getvalue() ) finally: cwltool.process.FILE_COUNT_WARNING = was diff --git a/tests/test_fetch.py b/tests/test_fetch.py index 2c06bbfc1..e55491d90 100644 --- a/tests/test_fetch.py +++ b/tests/test_fetch.py @@ -66,9 +66,7 @@ def test_resolver(d: Any, a: str) -> str: load_tool("foo.cwl", loadingContext) - assert ( - main(["--print-pre", "--debug", "foo.cwl"], loadingContext=loadingContext) == 0 - ) + assert main(["--print-pre", "--debug", "foo.cwl"], loadingContext=loadingContext) == 0 root = Path(os.path.join(get_data(""))) diff --git a/tests/test_http_input.py b/tests/test_http_input.py index 6564c25b8..6b4d9b479 100644 --- a/tests/test_http_input.py +++ b/tests/test_http_input.py @@ -12,8 +12,9 @@ def test_http_path_mapping(tmp_path: Path) -> None: - - input_file_path = "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta" + input_file_path = ( + "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta" + ) base_file: List[CWLObjectType] = [ { "class": "File", diff --git a/tests/test_iwdr.py b/tests/test_iwdr.py index 568b91d0f..a36a1ded1 100644 --- a/tests/test_iwdr.py +++ b/tests/test_iwdr.py @@ -35,9 +35,7 @@ def test_passthrough_successive(tmp_path: Path) -> None: ] ) assert err_code == 0 - children = sorted( - tmp_path.glob("*") - ) # This input directory should be left pristine. + children = sorted(tmp_path.glob("*")) # This input directory should be left pristine. assert len(children) == 1 subdir = tmp_path / children[0] assert len(sorted(subdir.glob("*"))) == 1 diff --git a/tests/test_load_tool.py b/tests/test_load_tool.py index df8f1361a..9727d772b 100644 --- a/tests/test_load_tool.py +++ b/tests/test_load_tool.py @@ -51,9 +51,7 @@ def test_use_metadata() -> None: def test_checklink_outputSource() -> None: """Is outputSource resolved correctly independent of value of do_validate.""" - outsrc = ( - Path(get_data("tests/wf/1st-workflow.cwl")).as_uri() + "#argument/classfile" - ) + outsrc = Path(get_data("tests/wf/1st-workflow.cwl")).as_uri() + "#argument/classfile" loadingContext = LoadingContext({"do_validate": True}) tool = load_tool(get_data("tests/wf/1st-workflow.cwl"), loadingContext) @@ -122,10 +120,7 @@ def test_load_graph_fragment_from_packed() -> None: # This was solved by making a shallow copy of the metadata # dict to ensure that the updater did not modify the original # document. - uri2 = ( - Path(get_data("tests/wf/packed-with-loadlisting.cwl")).as_uri() - + "#16169-step.cwl" - ) + uri2 = Path(get_data("tests/wf/packed-with-loadlisting.cwl")).as_uri() + "#16169-step.cwl" load_tool(uri2, loadingContext) finally: diff --git a/tests/test_misc_cli.py b/tests/test_misc_cli.py index 2f6ae4347..307153e16 100644 --- a/tests/test_misc_cli.py +++ b/tests/test_misc_cli.py @@ -28,18 +28,14 @@ def test_empty_cmdling() -> None: def test_tool_help() -> None: """Test --tool-help.""" - return_code, stdout, stderr = get_main_output( - ["--tool-help", get_data("tests/echo.cwl")] - ) + return_code, stdout, stderr = get_main_output(["--tool-help", get_data("tests/echo.cwl")]) assert return_code == 0 assert "job_order Job input json file" in stdout def test_basic_pack() -> None: """Basic test of --pack. See test_pack.py for detailed testing.""" - return_code, stdout, stderr = get_main_output( - ["--pack", get_data("tests/wf/revsort.cwl")] - ) + return_code, stdout, stderr = get_main_output(["--pack", get_data("tests/wf/revsort.cwl")]) assert return_code == 0 assert "$graph" in stdout @@ -80,8 +76,7 @@ def test_skip_schemas_external_step() -> None: ) assert exit_code == 0 assert ( - "Repeat node-elements inside property elements: " - "http://www.w3.org/1999/xhtmlmeta" + "Repeat node-elements inside property elements: " "http://www.w3.org/1999/xhtmlmeta" ) not in stderr assert ( "Could not load extension schema https://bad.example.com/missing.ttl: " diff --git a/tests/test_mpi.py b/tests/test_mpi.py index c9c69fe80..7322f9e30 100644 --- a/tests/test_mpi.py +++ b/tests/test_mpi.py @@ -140,8 +140,7 @@ def test_simple_mpi_tool(self, fake_mpi_conf: str, tmp_path: Path) -> None: stderr = StringIO() with working_directory(tmp_path): rc = main( - argsl=cwltool_args(fake_mpi_conf) - + [get_data("tests/wf/mpi_simple.cwl")], + argsl=cwltool_args(fake_mpi_conf) + [get_data("tests/wf/mpi_simple.cwl")], stdout=stdout, stderr=stderr, ) @@ -292,9 +291,7 @@ def schema_ext11() -> Generator[Names, None, None]: mpiReq = CommentedMap({"class": MPIRequirementName, "processes": 1}) containerReq = CommentedMap({"class": "DockerRequirement"}) -basetool = CommentedMap( - {"cwlVersion": "v1.1", "inputs": CommentedSeq(), "outputs": CommentedSeq()} -) +basetool = CommentedMap({"cwlVersion": "v1.1", "inputs": CommentedSeq(), "outputs": CommentedSeq()}) def mk_tool( diff --git a/tests/test_override.py b/tests/test_override.py index afacd3972..980c853bb 100644 --- a/tests/test_override.py +++ b/tests/test_override.py @@ -103,11 +103,7 @@ def test_overrides(parameters: List[str], result: Dict[str, str]) -> None: "enable support for development and deprecated versions.", ), ( - [ - get_data( - "tests/override/env-tool_cwl-requirement_override_default_wrongver.yaml" - ) - ], + [get_data("tests/override/env-tool_cwl-requirement_override_default_wrongver.yaml")], "`cwl:requirements` in the input object is not part of CWL v1.0. You can " "adjust to use `cwltool:overrides` instead; or you can set the cwlVersion to " "v1.1 or greater.", diff --git a/tests/test_pack.py b/tests/test_pack.py index 8a7cc7a72..1d38e35e8 100644 --- a/tests/test_pack.py +++ b/tests/test_pack.py @@ -44,9 +44,7 @@ def test_packing(unpacked: str, expected: str) -> None: """Compare expected version reality with various workflows and --pack.""" loadingContext, workflowobj, uri = fetch_document(get_data(unpacked)) loadingContext.do_update = False - loadingContext, uri = resolve_and_validate_document( - loadingContext, workflowobj, uri - ) + loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri) packed = json.loads(print_pack(loadingContext, uri)) context_dir = os.path.abspath(os.path.dirname(get_data(unpacked))) @@ -68,13 +66,9 @@ def test_packing(unpacked: str, expected: str) -> None: def test_pack_single_tool() -> None: - loadingContext, workflowobj, uri = fetch_document( - get_data("tests/wf/formattest.cwl") - ) + loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/formattest.cwl")) loadingContext.do_update = False - loadingContext, uri = resolve_and_validate_document( - loadingContext, workflowobj, uri - ) + loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri) loader = loadingContext.loader assert loader loader.resolve_ref(uri)[0] @@ -91,9 +85,7 @@ def test_pack_fragment() -> None: loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/scatter2.cwl")) packed = cwltool.pack.pack(loadingContext, uri + "#scatterstep/mysub") - adjustFileObjs( - packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))) - ) + adjustFileObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf")))) adjustDirObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf")))) packed_result = json.dumps(packed, sort_keys=True, indent=2) @@ -105,13 +97,9 @@ def test_pack_fragment() -> None: def test_pack_rewrites() -> None: rewrites: Dict[str, str] = {} - loadingContext, workflowobj, uri = fetch_document( - get_data("tests/wf/default-wf5.cwl") - ) + loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/default-wf5.cwl")) loadingContext.do_update = False - loadingContext, uri = resolve_and_validate_document( - loadingContext, workflowobj, uri - ) + loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri) loader = loadingContext.loader assert loader loader.resolve_ref(uri)[0] @@ -137,9 +125,7 @@ def test_pack_missing_cwlVersion(cwl_path: str) -> None: # Testing single tool workflow loadingContext, workflowobj, uri = fetch_document(get_data(cwl_path)) loadingContext.do_update = False - loadingContext, uri = resolve_and_validate_document( - loadingContext, workflowobj, uri - ) + loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri) loader = loadingContext.loader assert loader loader.resolve_ref(uri)[0] @@ -163,9 +149,7 @@ def test_pack_idempotence_workflow(tmp_path: Path) -> None: def _pack_idempotently(document: str, tmp_path: Path) -> None: loadingContext, workflowobj, uri = fetch_document(get_data(document)) loadingContext.do_update = False - loadingContext, uri = resolve_and_validate_document( - loadingContext, workflowobj, uri - ) + loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri) loader = loadingContext.loader assert loader loader.resolve_ref(uri)[0] @@ -182,9 +166,7 @@ def _pack_idempotently(document: str, tmp_path: Path) -> None: loadingContext, workflowobj, uri2 = fetch_document(tmp.name) loadingContext.do_update = False - loadingContext, uri2 = resolve_and_validate_document( - loadingContext, workflowobj, uri2 - ) + loadingContext, uri2 = resolve_and_validate_document(loadingContext, workflowobj, uri2) loader2 = loadingContext.loader assert loader2 loader2.resolve_ref(uri2)[0] @@ -212,9 +194,7 @@ def test_packed_workflow_execution( loadingContext.resolver = tool_resolver loadingContext, workflowobj, uri = fetch_document(get_data(wf_path), loadingContext) loadingContext.do_update = False - loadingContext, uri = resolve_and_validate_document( - loadingContext, workflowobj, uri - ) + loadingContext, uri = resolve_and_validate_document(loadingContext, workflowobj, uri) loader = loadingContext.loader assert loader loader.resolve_ref(uri)[0] diff --git a/tests/test_path_checks.py b/tests/test_path_checks.py index 0e8cb1214..9344b9e85 100644 --- a/tests/test_path_checks.py +++ b/tests/test_path_checks.py @@ -167,9 +167,7 @@ def test_clt_returns_specialchar_names(tmp_path: Path) -> None: # Mock an "output" file with the above special characters in its name special = "".join(reserved) - output_schema = cast( - CWLObjectType, {"type": "File", "outputBinding": {"glob": special}} - ) + output_schema = cast(CWLObjectType, {"type": "File", "outputBinding": {"glob": special}}) mock_output = tmp_path / special mock_output.touch() diff --git a/tests/test_pathmapper.py b/tests/test_pathmapper.py index fae1cb5d8..b7cf2f6a1 100644 --- a/tests/test_pathmapper.py +++ b/tests/test_pathmapper.py @@ -65,9 +65,7 @@ def __init__( @pytest.mark.parametrize("name,file_dir,expected", normalization_parameters) -def test_normalizeFilesDirs( - name: str, file_dir: CWLObjectType, expected: CWLObjectType -) -> None: +def test_normalizeFilesDirs(name: str, file_dir: CWLObjectType, expected: CWLObjectType) -> None: normalizeFilesDirs(file_dir) assert file_dir == expected, name diff --git a/tests/test_provenance.py b/tests/test_provenance.py index 4adb5e5c1..4cc9e1c34 100644 --- a/tests/test_provenance.py +++ b/tests/test_provenance.py @@ -186,10 +186,10 @@ def test_directory_workflow(tmp_path: Path) -> None: # Input files should be captured by hash value, # even if they were inside a class: Directory - for (l, l_hash) in sha1.items(): + for letter, l_hash in sha1.items(): prefix = l_hash[:2] # first 2 letters p = folder / "data" / prefix / l_hash - assert p.is_file(), f"Could not find {l} as {p}" + assert p.is_file(), f"Could not find {letter} as {p}" @needs_docker @@ -383,9 +383,7 @@ def check_ro(base_path: Path, nested: bool = False) -> None: packed = urllib.parse.urljoin(arcp_root, "/workflow/packed.cwl") primary_job = urllib.parse.urljoin(arcp_root, "/workflow/primary-job.json") - primary_prov_nt = urllib.parse.urljoin( - arcp_root, "/metadata/provenance/primary.cwlprov.nt" - ) + primary_prov_nt = urllib.parse.urljoin(arcp_root, "/metadata/provenance/primary.cwlprov.nt") uuid = arcp.parse_arcp(arcp_root).uuid highlights = set(g.subjects(OA.motivatedBy, OA.highlighting)) diff --git a/tests/test_recursive_validation.py b/tests/test_recursive_validation.py index 9d52404c7..300c99d88 100644 --- a/tests/test_recursive_validation.py +++ b/tests/test_recursive_validation.py @@ -6,9 +6,7 @@ def test_recursive_validation() -> None: """Test the recursive_resolve_and_validate_document function.""" - loadingContext, workflowobj, uri = fetch_document( - get_data("tests/wf/default_path.cwl") - ) + loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/default_path.cwl")) loadingContext, uri, tool = recursive_resolve_and_validate_document( loadingContext, workflowobj, uri ) diff --git a/tests/test_singularity_versions.py b/tests/test_singularity_versions.py index 844f62a09..3f79e8266 100644 --- a/tests/test_singularity_versions.py +++ b/tests/test_singularity_versions.py @@ -38,12 +38,8 @@ def test_get_version() -> None: assert isinstance(v, tuple) assert isinstance(v[0], list) assert isinstance(v[1], str) - assert ( - cwltool.singularity._SINGULARITY_VERSION is not None - ) # pylint: disable=protected-access - assert ( - len(cwltool.singularity._SINGULARITY_FLAVOR) > 0 - ) # pylint: disable=protected-access + assert cwltool.singularity._SINGULARITY_VERSION is not None # pylint: disable=protected-access + assert len(cwltool.singularity._SINGULARITY_FLAVOR) > 0 # pylint: disable=protected-access v_cached = get_version() assert v == v_cached diff --git a/tests/test_subgraph.py b/tests/test_subgraph.py index a4ac69347..fe29b5345 100644 --- a/tests/test_subgraph.py +++ b/tests/test_subgraph.py @@ -102,10 +102,7 @@ def test_single_process_inherit_reqshints(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6" def test_single_process_inherit_hints_collision(tmp_path: Path) -> None: @@ -121,10 +118,7 @@ def test_single_process_inherit_hints_collision(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519" def test_single_process_inherit_reqs_collision(tmp_path: Path) -> None: @@ -140,10 +134,7 @@ def test_single_process_inherit_reqs_collision(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519" def test_single_process_inherit_reqs_step_collision(tmp_path: Path) -> None: @@ -159,8 +150,7 @@ def test_single_process_inherit_reqs_step_collision(tmp_path: Path) -> None: ) assert err_code == 0 assert ( - json.loads(stdout)["output"]["checksum"] - == "sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e" + json.loads(stdout)["output"]["checksum"] == "sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e" ) @@ -177,10 +167,7 @@ def test_single_process_inherit_reqs_hints_collision(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519" def test_single_process_inherit_only_hints(tmp_path: Path) -> None: @@ -196,10 +183,7 @@ def test_single_process_inherit_only_hints(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$ab5f2a9add5f54622dde555ac8ae9a3000e5ee0a" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$ab5f2a9add5f54622dde555ac8ae9a3000e5ee0a" def test_single_process_subwf_step(tmp_path: Path) -> None: @@ -215,10 +199,7 @@ def test_single_process_subwf_step(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6" def test_single_process_packed_subwf_step(tmp_path: Path) -> None: @@ -234,10 +215,7 @@ def test_single_process_packed_subwf_step(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$cdc1e84968261d6a7575b5305945471f8be199b6" @needs_docker @@ -253,8 +231,7 @@ def test_single_process_subwf_subwf_inline_step() -> None: ) assert err_code == 0 assert ( - json.loads(stdout)["output"]["checksum"] - == "sha1$3596ea087bfdaf52380eae441077572ed289d657" + json.loads(stdout)["output"]["checksum"] == "sha1$3596ea087bfdaf52380eae441077572ed289d657" ) @@ -271,10 +248,7 @@ def test_single_step_subwf_step(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c" def test_single_step_wfstep_long_out(tmp_path: Path) -> None: @@ -290,10 +264,7 @@ def test_single_step_wfstep_long_out(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c" def test_single_step_packed_subwf_step(tmp_path: Path) -> None: @@ -309,10 +280,7 @@ def test_single_step_packed_subwf_step(tmp_path: Path) -> None: ] ) assert err_code == 0 - assert ( - json.loads(stdout)["out"]["checksum"] - == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c" - ) + assert json.loads(stdout)["out"]["checksum"] == "sha1$7608e5669ba454c61fab01c9b133b52a9a7de68c" @needs_docker diff --git a/tests/test_target.py b/tests/test_target.py index 400027ec8..724579736 100644 --- a/tests/test_target.py +++ b/tests/test_target.py @@ -6,9 +6,7 @@ def test_target() -> None: """Test --target option successful.""" test_file = "tests/wf/scatter-wf4.cwl" - exit_code = main( - ["--target", "out", get_data(test_file), "--inp1", "INP1", "--inp2", "INP2"] - ) + exit_code = main(["--target", "out", get_data(test_file), "--inp1", "INP1", "--inp2", "INP2"]) assert exit_code == 0 @@ -32,7 +30,5 @@ def test_wrong_target() -> None: def test_target_packed() -> None: """Test --target option with packed workflow schema.""" test_file = "tests/wf/scatter-wf4.json" - exit_code = main( - ["--target", "out", get_data(test_file), "--inp1", "INP1", "--inp2", "INP2"] - ) + exit_code = main(["--target", "out", get_data(test_file), "--inp1", "INP1", "--inp2", "INP2"]) assert exit_code == 0 diff --git a/tests/test_tmpdir.py b/tests/test_tmpdir.py index 4baacc745..b96f5e5db 100644 --- a/tests/test_tmpdir.py +++ b/tests/test_tmpdir.py @@ -109,13 +109,9 @@ def test_commandLineTool_job_tmpdir_prefix(tmp_path: Path) -> None: @needs_docker -def test_dockerfile_tmpdir_prefix( - tmp_path: Path, monkeypatch: pytest.MonkeyPatch -) -> None: +def test_dockerfile_tmpdir_prefix(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: """Test that DockerCommandLineJob.get_image respects temp directory directives.""" - monkeypatch.setattr( - target=subprocess, name="check_call", value=lambda *args, **kwargs: True - ) + monkeypatch.setattr(target=subprocess, name="check_call", value=lambda *args, **kwargs: True) (tmp_path / "out").mkdir() tmp_outdir_prefix = tmp_path / "out" / "1" (tmp_path / "3").mkdir() @@ -220,9 +216,7 @@ def test_docker_tmpdir_prefix(tmp_path: Path) -> None: resolved=str(resolved_writable_dir), target="bar", type=None, staged=None ) (tmp_path / "2").mkdir() - job.add_writable_directory_volume( - runtime, volume_dir, None, str(tmp_path / "2" / "dir") - ) + job.add_writable_directory_volume(runtime, volume_dir, None, str(tmp_path / "2" / "dir")) children = sorted((tmp_path / "2").glob("*")) assert len(children) == 1 subdir = tmp_path / "2" / children[0] @@ -235,9 +229,7 @@ def test_docker_tmpdir_prefix(tmp_path: Path) -> None: volume_file = MapperEnt(resolved="Hoopla!", target="baz", type=None, staged=None) (tmp_path / "4").mkdir() - job.create_file_and_add_volume( - runtime, volume_file, None, None, str(tmp_path / "4" / "file") - ) + job.create_file_and_add_volume(runtime, volume_file, None, None, str(tmp_path / "4" / "file")) children = sorted((tmp_path / "4").glob("*")) assert len(children) == 1 subdir = tmp_path / "4" / children[0] diff --git a/tests/test_toolargparse.py b/tests/test_toolargparse.py index 6620e4a14..ad6626385 100644 --- a/tests/test_toolargparse.py +++ b/tests/test_toolargparse.py @@ -215,9 +215,7 @@ def test_argparser_without_doc() -> None: ), ], ) -def test_argparse_append_with_default( - job_order: List[str], expected_values: List[str] -) -> None: +def test_argparse_append_with_default(job_order: List[str], expected_values: List[str]) -> None: """ Confirm that the appended arguments must not include the default. @@ -225,9 +223,7 @@ def test_argparse_append_with_default( """ loadingContext = LoadingContext() tool = load_tool(get_data("tests/default_values_list.cwl"), loadingContext) - toolparser = generate_parser( - argparse.ArgumentParser(prog="test"), tool, {}, [], False - ) + toolparser = generate_parser(argparse.ArgumentParser(prog="test"), tool, {}, [], False) cmd_line = vars(toolparser.parse_args(job_order)) file_paths = list(cmd_line["file_paths"]) assert expected_values == file_paths diff --git a/tests/test_udocker.py b/tests/test_udocker.py index e2bc1b19f..eb9a0ebfe 100644 --- a/tests/test_udocker.py +++ b/tests/test_udocker.py @@ -19,7 +19,6 @@ def udocker(tmp_path_factory: TempPathFactory) -> str: test_environ = copy.copy(os.environ) docker_install_dir = str(tmp_path_factory.mktemp("udocker")) with working_directory(docker_install_dir): - url = "https://github.com/indigo-dc/udocker/releases/download/1.3.5/udocker-1.3.5.tar.gz" install_cmds = [ ["curl", "-L", url, "-o", "./udocker-tarball.tgz"], diff --git a/tests/test_validate_js.py b/tests/test_validate_js.py index d4520ee93..4c81a5576 100644 --- a/tests/test_validate_js.py +++ b/tests/test_validate_js.py @@ -69,23 +69,15 @@ def test_js_hint_reports_invalid_js() -> None: def test_js_hint_warn_on_es6() -> None: - assert ( - len(validate_js.jshint_js(code_fragment_to_js("((() => 4)())"), []).errors) == 1 - ) + assert len(validate_js.jshint_js(code_fragment_to_js("((() => 4)())"), []).errors) == 1 def test_js_hint_error_on_undefined_name() -> None: - assert ( - len(validate_js.jshint_js(code_fragment_to_js("undefined_name()")).errors) == 1 - ) + assert len(validate_js.jshint_js(code_fragment_to_js("undefined_name()")).errors) == 1 def test_js_hint_set_defined_name() -> None: assert ( - len( - validate_js.jshint_js( - code_fragment_to_js("defined_name()"), ["defined_name"] - ).errors - ) + len(validate_js.jshint_js(code_fragment_to_js("defined_name()"), ["defined_name"]).errors) == 0 ) diff --git a/tox.ini b/tox.ini index bd675b40d..6c48347ff 100644 --- a/tox.ini +++ b/tox.ini @@ -55,7 +55,7 @@ deps = py311-pydocstyle: diff-cover py311-lintreadme: twine py311-lintreadme: build - py311-lintreadme: readme_renderer[md] + py311-lintreadme: readme_renderer[rst] setenv = py3{6,7,8,9,10,11}-unit: LC_ALL = C.UTF-8