Source code for macsyfinder.scripts.msf

#########################################################################
# MacSyFinder - Detection of macromolecular systems in protein dataset  #
#               using systems modelling and similarity search.          #
# Authors: Sophie Abby, Bertrand Neron                                  #
# Copyright (c) 2014-2025  Institut Pasteur (Paris) and CNRS.           #
# See the COPYRIGHT file for details                                    #
#                                                                       #
# This file is part of MacSyFinder package.                             #
#                                                                       #
# MacSyFinder is free software: you can redistribute it and/or modify   #
# it under the terms of the GNU General Public License as published by  #
# the Free Software Foundation, either version 3 of the License, or     #
# (at your option) any later version.                                   #
#                                                                       #
# MacSyFinder is distributed in the hope that it will be useful,        #
# but WITHOUT ANY WARRANTY; without even the implied warranty of        #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the          #
# GNU General Public License for more details .                         #
#                                                                       #
# You should have received a copy of the GNU General Public License     #
# along with MacSyFinder (COPYING).                                     #
# If not, see <https://www.gnu.org/licenses/>.                          #
#########################################################################

"""
Main entrypoint to macsyfinder
"""
import shutil

import sys
import os
import argparse
import logging
import itertools
import signal
import time
import typing
from textwrap import dedent

import colorlog

import macsylib
from macsylib.config import MacsyDefaults, Config
from macsylib.io import unlikely_systems_to_txt, likely_systems_to_tsv, likely_systems_to_txt, summary_best_solution, \
    multisystems_to_tsv, loners_to_tsv, systems_to_tsv, solutions_to_tsv, rejected_candidates_to_tsv, \
    rejected_candidates_to_txt, systems_to_txt
from macsylib.registries import ModelRegistry, scan_models_dir
from macsylib.error import OptionError, Timeout, EmptyFileError
from macsylib.search_systems import search_systems
from macsylib.system import HitSystemTracker
from macsylib.utils import get_def_to_detect, get_replicon_names, parse_time
from macsylib.solution import find_best_solutions

from macsyfinder import get_version_message
from macsyfinder.io import outfile_header
_log = colorlog.getLogger('macsylib')


[docs] def alarm_handler(signum: signal.Signals, frame) -> None: """ Handle signal alarm flush loggers :param signum: :param frame: :raise: Timeout """ _log.critical("Timeout is over. Aborting") for h in _log.handlers: h.flush() # I exit with 0 otherwise in parallel_msf the job will be retry # on an other machine. we don't want that. #sys.exit(0) raise Timeout()
[docs] def list_models(args: argparse.Namespace) -> str: """ :param args: The command line argument once parsed :return: a string representation of all models and submodels installed. """ defaults = MacsyDefaults(package_name='macsyfinder', tool_name='macsyfinder') config = Config(defaults, args) model_dirs = config.models_dir() registry = ModelRegistry() for model_dir in model_dirs: try: for model_loc in scan_models_dir(model_dir, profile_suffix=config.profile_suffix()): registry.add(model_loc) except PermissionError as err: _log.warning(f"{model_dir} is not readable: {err} : skip it.") return str(registry)
[docs] def parse_args(args: list[str]) -> tuple[argparse.ArgumentParser, argparse.Namespace]: """ :param args: The arguments provided on the command line :return: The arguments parsed """ parser = argparse.ArgumentParser( epilog="For more details, visit the MacSyFinder website and see the MacSyFinder documentation.", # formatter_class=ArgumentDefaultsHelpRawTextFormatter, formatter_class=argparse.RawTextHelpFormatter, description=dedent(r''' * * * * * * * * * * ** * * ** * * * * * * * __ __ * ____ * * * * ** * || | \/ | __ _ ___ || / ___| _ _ || ___ _ _ * || | |\/| |/ _` |/ __| || \___ \| | | | || | __(_)_ _ __| |___ _ _ || | | | | (_| | (__ || ___) | |_| | || | _|| | ' \/ _` / -_) '_| || |_| |_|\__,_|\___| || |____/ \__, | || |_| |_|_||_\__,_\___|_| * * |___/ * * * * * * * ** * * * * * * * * * * * * * * * MacSyFinder (MSF) - Detection of macromolecular systems in protein datasets using systems modelling and similarity search. ''')) msf_def = MacsyDefaults(package_name='macsyfinder', tool_name='macsyfinder') # , formatter_class=argparse.RawDescriptionHelpFormatter) # , formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-m", "--models", nargs='*', default=None, help="""The models to search. The first element must be the name of family models, followed by the name of the models to search. If the name 'all' is in the list of models, all models from the family will be searched. '--models TXSS Flagellum T2SS' means MSF will search for the models TXSS/Flagellum and TXSS/T2SS '--models TXSS all' means MSF will search for all models found in the model package TXSS '--models CRISPRcas/subtyping all' means MSF will search for all models described in the CRISPRCas/subtyping subfamily. (required unless --previous-run is set) """) genome_options = parser.add_argument_group(title="Input dataset options") genome_options.add_argument("--sequence-db", action='store', default=None, help="""Path to the sequence dataset in fasta format (gzip files are supported). (required unless --previous-run is set) """) genome_options.add_argument("--db-type", choices=['ordered_replicon', 'gembase', 'unordered'], default=None, help='''The type of dataset to deal with. "unordered" corresponds to a non-assembled genome or set of unassembled genes, "ordered_replicon" to an assembled genome, "gembase" to a set of replicons where sequence identifiers follow this convention: ">RepliconName_SequenceID". (required unless --previous-run is set) ''') genome_options.add_argument("--replicon-topology", choices=['linear', 'circular'], default=None, help=f"""The topology of the replicons (this option is meaningful only if the db_type is 'ordered_replicon' or 'gembase'.) (default: {msf_def['replicon_topology']}) """) genome_options.add_argument("--topology-file", default=None, help="""Topology file path. The topology file allows to specify a topology (linear or circular) for each replicon (this option is meaningful only if the db_type is 'ordered_replicon' or 'gembase'. A topology file is a tabular file with two columns: the 1st is the replicon name, and the 2nd the corresponding topology: \"RepliconA\tlinear\" """) genome_options.add_argument("--idx", action='store_true', default=False, help=f"""Forces to build the indexes for the sequence dataset even if they were previously computed and present at the dataset location. (default: {msf_def['idx']})""" ) system_options = parser.add_argument_group(title="Systems detection options") system_options.add_argument("--inter-gene-max-space", action='append', nargs=2, default=None, help="""Co-localization criterion: maximum number of components non-matched by a profile allowed between two matched components for them to be considered contiguous. Option only meaningful for 'ordered' datasets. The first value must name a model, the second a number of components. This option can be repeated several times: "--inter-gene-max-space TXSS/T2SS 12 --inter-gene-max-space TXSS/Flagellum 20 """ ) system_options.add_argument("--min-mandatory-genes-required", action='append', nargs=2, default=None, help="""The minimal number of mandatory genes required for model assessment. The first value must correspond to a model fully qualified name, the second value to an integer. This option can be repeated several times: "--min-mandatory-genes-required TXSS/T2SS 15 --min-mandatory-genes-required TXSS/Flagellum 10" """ ) system_options.add_argument("--min-genes-required", action='append', nargs=2, default=None, help="""The minimal number of genes required for model assessment (includes both 'mandatory' and 'accessory' components). The first value must correspond to a model fully qualified name, the second value to an integer. This option can be repeated several times: "--min-genes-required TXSS/T2SS 15 --min-genes-required TXSS/Flagellum 10 """ ) system_options.add_argument("--max-nb-genes", action='append', nargs=2, default=None, help="""The maximal number of genes to consider a system as full. The first value must correspond to a model name, the second value to an integer. This option can be repeated several times: "--max-nb-genes TXSS/T2SS 5 --max-nb-genes TXSS/Flagellum 10" """ ) system_options.add_argument("--multi-loci", action='store', default=None, help="""Specifies if the system can be detected as a 'scattered' (or multiple-loci-encoded) system. The models are specified as a comma separated list of fully qualified name(s) "--multi-loci model_familyA/model_1,model_familyB/model_2" """) hmmer_options = parser.add_argument_group(title="Options for Hmmer execution and hits filtering") hmmer_options.add_argument('--hmmer', action='store', default=None, help=f"""Path to the hmmsearch program. If not specified, rely on the environment variable PATH (default: {msf_def['hmmer']})""") hmmer_options.add_argument('--e-value-search', action='store', type=float, default=None, help=f"""Maximal e-value for hits to be reported during hmmsearch search. By default MSF set per profile threshold for hmmsearch run (hmmsearch --cut_ga option) for profiles containing the GA bit score threshold. If a profile does not contains the GA bit score the --e-value-search (-E in hmmsearch) is applied to this profile. To applied the --e-value-search to all profiles use the --no-cut-ga option. (default: {msf_def['e_value_search']}) """) cut_ga_group = hmmer_options.add_mutually_exclusive_group() cut_ga_group.add_argument('--no-cut-ga', action='store_true', default=None, help=f"""By default the MSF try to applied a threshold per profile by using the hmmer -cut-ga option. This is possible only if the GA bit score is present in the profile otherwise MF switch to use the --e-value-search (-E in hmmsearch). If this option is set the --e-value-search option is used for all profiles regardless the presence of the a GA bit score in the profiles. (default: {not msf_def['cut_ga']})""") cut_ga_group.add_argument('--cut-ga', action='store_true', default=None, help=f"""By default the MSF try to applied a threshold per profile by using the hmmer -cut-ga option. This is possible only if the GA bit score is present in the profile otherwise MSF switch to use the --e-value-search (-E in hmmsearch). But the modeler can override this default behavior to do not use cut_ga but --e-value-search instead (-E in hmmsearch). The user can reestablish the general MSF behavior, be sure the profiles contain the GA bit score. (default: {msf_def['cut_ga']})""") hmmer_options.add_argument('--i-evalue-sel', action='store', type=float, default=None, help=f"""Maximal independent e-value for Hmmer hits to be selected for systems detection. (default:{msf_def['i_evalue_sel']})""") hmmer_options.add_argument('--coverage-profile', action='store', type=float, default=None, help=f"""Minimal profile coverage required for the hit alignment with the profile to allow the hit selection for systems detection. (default: {msf_def['coverage_profile']})""") score_options = parser.add_argument_group(title="Score options", description="Options for cluster and systems scoring") score_options.add_argument('--mandatory-weight', action='store', type=float, default=None, help=f"""the weight of a mandatory component in cluster scoring (default:{msf_def['mandatory_weight']})""") score_options.add_argument('--accessory-weight', action='store', type=float, default=None, help=f"""the weight of a accessory component in cluster scoring (default:{msf_def['accessory_weight']})""") # the weight of a mandatory component in cluster scoring # (default:{msf_def['neutral_weight']}) score_options.add_argument('--neutral-weight', action='store', type=float, default=None, help=argparse.SUPPRESS) # the weight modifier for a component which code for itself cluster scoring # (default:{msf_def['itself_weight']})""" score_options.add_argument('--itself-weight', action='store', type=float, default=None, help=argparse.SUPPRESS) score_options.add_argument('--exchangeable-weight', action='store', type=float, default=None, help=f"""the weight modifier for a component which code for exchangeable cluster scoring (default:{msf_def['exchangeable_weight']})""") score_options.add_argument('--redundancy-penalty', action='store', type=float, default=None, help=f"""the weight modifier for cluster which bring a component already presents in other clusters (default:{msf_def['redundancy_penalty']})""") score_options.add_argument('--out-of-cluster', action='store', type=float, default=None, help=f"""the weight modifier for a hit which is a - true loner (not in cluster) - or multi-system (from an other system) (default:{msf_def['out_of_cluster_weight']})""") dir_options = parser.add_argument_group(title="Path options", description=None) dir_options.add_argument('--models-dir', action='store', default=None, help="""Specifies the path to the models if the models are not installed in the canonical place. It gathers definitions (xml files) and HMM profiles arranged in a specific file structure. A directory with the name of the model with at least two directories 'profiles' - which contains HMM profiles for each gene components described in the systems' models 'models' - which contains either the XML files of models' definitions or subdirectories to organize the models in subsystems.""") dir_options.add_argument('-o', '--out-dir', action='store', default=None, help="""Path to the directory where to store output results. if out-dir is specified, res-search-dir will be ignored.""") dir_options.add_argument('--force', dest='force_run', action='store_true', default=None, help="""force to run even the out dir already exists and is not empty. Use this option with caution, MSF will erase everything in out dir before to run.""" ) dir_options.add_argument('--index-dir', action='store', default=None, help="Specifies the path to a directory to store/read the sequence index when the sequence-db dir is not writable.") dir_options.add_argument('--res-search-suffix', action='store', default=None, help="The suffix to give to Hmmer raw output files. " f"(default: {msf_def['res_search_suffix']})") dir_options.add_argument('--res-extract-suffix', action='store', default=None, help="The suffix to give to filtered hits output files. " f"(default: {msf_def['res_extract_suffix']})") dir_options.add_argument('--profile-suffix', action='store', default=None, help=f"""The suffix of profile files. For each 'Gene' element, the corresponding profile is searched in the 'profile_dir', in a file which name is based on the Gene name + the profile suffix. For instance, if the Gene is named 'gspG' and the suffix is '.hmm3', then the profile should be placed at the specified location under the name 'gspG.hmm3' (default: {msf_def['profile_suffix']})""" ) general_options = parser.add_argument_group(title="General options", description=None) general_options.add_argument("-w", "--worker", action='store', type=int, default=None, help=f"""Number of workers to be used by MacSyFinder. In the case the user wants to run MacSyFinder in a multi-thread mode. 0 mean that all threads available will be used. (default: {msf_def['worker']})""" ) general_options.add_argument("-v", "--verbosity", action="count", default=0, help="""Increases the verbosity level. There are 4 levels: Error messages (default), Warning (-v), Info (-vv) and Debug.(-vvv)""") general_options.add_argument("--mute", action="store_true", default=False, help=f"""Mute the log on stdout. (continue to log on macsyfinder.log) (default: {msf_def['mute']})""") general_options.add_argument("--version", action="version", version=get_version_message()) general_options.add_argument("-l", "--list-models", action="store_true", default=False, help="Displays all models installed at generic location and quit.") general_options.add_argument("--cfg-file", action='store', help="Path to a MacSyFinder configuration file to be used. (conflict with --previous-run)") general_options.add_argument("--previous-run", action='store', default=None, help="""Path to a previous MacSyFinder run directory. It allows to skip the Hmmer search step on a same dataset, as it uses previous run results and thus parameters regarding Hmmer detection. The configuration file from this previous run will be used. Conflicts with options: --cfg-file, --sequence-db, --profile-suffix, --res-extract-suffix, --e-value-res, --db-type, --hmmer""") general_options.add_argument("--relative-path", action='store_true', default=False, help=argparse.SUPPRESS) # 'relative-path' option help message (currently hidden) # Use relative paths instead of absolute paths. This option is used # by developers to generate portable data set, as for example test # data set, which are used on many different machines (using previous-run option). general_options.add_argument("--timeout", action='store', default=None, type=parse_time, help="""In some case msf can take a long time to find the best solution (in 'gembase' and 'ordered_replicon mode'). The timeout is per replicon. If this step reach the timeout, the replicon is skipped (for gembase mode the analyse of other replicons continue). NUMBER[SUFFIX] NUMBER seconds. SUFFIX may be 's' for seconds (the default), 'm' for minutes, 'h' for hours or 'd' for days for instance 1h2m3s means 1 hour 2 min 3 sec. NUMBER must be an integer. """) parsed_args = parser.parse_args(args) if parsed_args.cfg_file and parsed_args.previous_run: # argparse does not allow to have mutually exclusive option in a argument group # I prefer to have these 2 options in general options group # so I mimic the exclusive_group behavior parser.print_usage() print("macsyfinder: error: argument --previous-run: not allowed with argument --cfg-file") sys.exit(2) return parser, parsed_args
[docs] def main(args: list[str] | None = None, loglevel: typing.Literal['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] | int | None = None): """ main entry point to MacSyFinder do some check before to launch :func:`main_search_systems` which is the real function that perform a search :param args: the arguments passed on the command line without the program name :param loglevel: the output verbosity """ args = sys.argv[1:] if args is None else args parser, parsed_args = parse_args(args) defaults = MacsyDefaults(package_name='macsyfinder', tool_name='macsyfinder') config = Config(defaults, parsed_args) if parsed_args.list_models: print(list_models(parsed_args), file=sys.stdout) sys.exit(0) ########################### # creation of working dir ########################### working_dir = config.working_dir() if not os.path.exists(working_dir): os.makedirs(working_dir) else: if os.path.isdir(working_dir): if config.force_run(): shutil.rmtree(working_dir) os.makedirs(working_dir) elif os.listdir(working_dir): raise ValueError(f"'{working_dir}' already exists and is not a empty") else: raise ValueError(f"'{working_dir}' already exists and is not a directory") ################ # init loggers # ################ macsylib.init_logger(log_file=os.path.join(config.working_dir(), config.log_file()), out=not config.mute()) if not loglevel: # logs are specify from args options macsylib.logger_set_level(level=config.log_level()) else: # used by unit tests to mute or unmute logs macsylib.logger_set_level(level=loglevel) logger = logging.getLogger('macsylib.macsyfinder') if not parsed_args.previous_run and not parsed_args.models: parser.print_help() print() sys.tracebacklimit = 0 raise OptionError("argument --models or --previous-run is required.") elif not parsed_args.previous_run and not parsed_args.sequence_db: parser.print_help() print() sys.tracebacklimit = 0 raise OptionError("argument --sequence-db or --previous-run is required.") elif not parsed_args.previous_run and not parsed_args.db_type: parser.print_help() print() sys.tracebacklimit = 0 raise OptionError("argument --db-type or --previous-run is required.") ############################# # command seems Ok Let's go # ############################# _log.info(get_version_message()) _log.info(f"command used: {' '.join(sys.argv)}") ######################################## # compute which model I have to search # ######################################## model_registry = ModelRegistry() for model_dir in config.models_dir(): try: models_loc_available = scan_models_dir(model_dir, profile_suffix=config.profile_suffix(), relative_path=config.relative_path()) for model_loc in models_loc_available: model_registry.add(model_loc) except PermissionError as err: _log.warning(f"{model_dir} is not readable: {err} : skip it.") try: models_def_to_detect, models_fam_name, models_version = get_def_to_detect(config.models(), model_registry) except KeyError as err: sys.exit(f"macsyfinder: {err}") _log.info(f"\nmodels used: {models_fam_name}-{models_version}") logger.info(f"\n{' Searching systems ':#^70}") try: all_systems, rejected_candidates = search_systems(config, model_registry, models_def_to_detect, logger) except EmptyFileError as err: _log.critical(str(err)) sys.exit(f"macsyfinder: {err} Run Aborted.") track_multi_systems_hit = HitSystemTracker(all_systems) skipped_replicons = [] if config.db_type() in ('gembase', 'ordered_replicon'): ############################# # Ordered/Gembase replicons # ############################# ########################### # select the best systems # ########################### logger.info(f"\n{' Computing best solutions ':#^70}") all_best_solutions = [] one_best_solution = [] # group systems found by replicon # before to search best system combination for rep_name, syst_group in itertools.groupby(all_systems, key=lambda s: s.replicon_name): syst_group = list(syst_group) logger.info(f"Computing best solutions for {rep_name} (nb of candidate systems {len(syst_group)})") timeout = config.timeout() if timeout: # in some case best_solution take too much time # user can define a timeout by default set to 0 signal.signal(signal.SIGALRM, alarm_handler) signal.alarm(config.timeout()) _log.debug(f"set time out to {timeout} sec.") try: find_best_solutions_start = time.perf_counter() best_sol_4_1_replicon, score = find_best_solutions(syst_group) find_best_solutions_stop = time.perf_counter() except Timeout: _log.error(f"The {rep_name} cannot be solved in time skip it!") skipped_replicons.append(rep_name) continue if timeout: _log.debug("Cancel the time out.") signal.alarm(0) signal.signal(signal.SIGALRM, signal.SIG_DFL) logger.info(f"It took {find_best_solutions_stop - find_best_solutions_start:.2f}sec to find best solution" f" ({score:.2f}) for replicon {rep_name}") # if several solutions are equivalent same number of system and score is same # store all equivalent solution in all_best_solution => all_best_systems # pick one in one_best_solution => best_systems all_best_solutions.extend(best_sol_4_1_replicon) one_best_solution.append(best_sol_4_1_replicon[0]) ############################## # Write the results in files # ############################## logger.info(f"""\n{f" Writing down results in '{os.path.basename(config.working_dir())}' ":#^70}""") system_filename = os.path.join(config.working_dir(), "all_systems.txt") tsv_filename = os.path.join(config.working_dir(), "all_systems.tsv") with open(system_filename, "w") as sys_file: systems_to_txt(models_fam_name, models_version, all_systems, track_multi_systems_hit, sys_file, skipped_replicons=skipped_replicons, header=outfile_header) with open(tsv_filename, "w") as tsv_file: systems_to_tsv(models_fam_name, models_version, all_systems, track_multi_systems_hit, tsv_file, skipped_replicons=skipped_replicons, header=outfile_header) cluster_filename = os.path.join(config.working_dir(), "rejected_candidates.txt") with open(cluster_filename, "w") as clst_file: rejected_candidates.sort(key=lambda clst: (clst.replicon_name, clst.model, clst.hits)) rejected_candidates_to_txt(models_fam_name, models_version, rejected_candidates, clst_file, skipped_replicons=skipped_replicons, header=outfile_header) if not (all_systems or rejected_candidates): logger.info("No Systems found in this dataset.") cluster_filename = os.path.join(config.working_dir(), "rejected_candidates.tsv") with open(cluster_filename, "w") as clst_file: rejected_candidates_to_tsv(models_fam_name, models_version, rejected_candidates, clst_file, skipped_replicons=skipped_replicons, header=outfile_header) tsv_filename = os.path.join(config.working_dir(), "all_best_solutions.tsv") with open(tsv_filename, "w") as tsv_file: solutions_to_tsv(models_fam_name, models_version, all_best_solutions, track_multi_systems_hit, tsv_file, skipped_replicons=skipped_replicons, header=outfile_header) best_solution_filename = os.path.join(config.working_dir(), "best_solution.tsv") with open(best_solution_filename, "w") as best_solution_file: one_best_solution = [syst for sol in one_best_solution for syst in sol] one_best_solution.sort(key=lambda syst: (syst.replicon_name, syst.position[0], syst.model.fqn, - syst.score)) systems_to_tsv(models_fam_name, models_version, one_best_solution, track_multi_systems_hit, best_solution_file, skipped_replicons=skipped_replicons, header=outfile_header) loners_filename = os.path.join(config.working_dir(), "best_solution_loners.tsv") with open(loners_filename, "w") as loners_file: loners_to_tsv(models_fam_name, models_version, one_best_solution, loners_file, header=outfile_header) multisystems_filename = os.path.join(config.working_dir(), "best_solution_multisystems.tsv") with open(multisystems_filename, "w") as multisystems_file: multisystems_to_tsv(models_fam_name, models_version, one_best_solution, multisystems_file, header=outfile_header) summary_filename = os.path.join(config.working_dir(), "best_solution_summary.tsv") with open(summary_filename, "w") as summary_file: models_fqn = [m.fqn for m in models_def_to_detect] replicons_names = get_replicon_names(config.sequence_db(), config.db_type()) summary_best_solution(models_fam_name, models_version, best_solution_filename, summary_file, models_fqn, replicons_names, skipped_replicons=skipped_replicons, header=outfile_header) else: ####################### # Unordered replicons # ####################### ############################## # Write the results in files # ############################## logger.info(f"""\n{f" Writing down results in '{os.path.basename(config.working_dir())}' ":#^70}""") system_filename = os.path.join(config.working_dir(), "all_systems.txt") with open(system_filename, "w") as sys_file: likely_systems_to_txt(models_fam_name, models_version, all_systems, track_multi_systems_hit, sys_file) # forbidden = [s for s in all_systems if s.forbidden_occ] # system_filename = os.path.join(config.working_dir(), "forbidden_components.tsv") # with open(system_filename, "w") as sys_file: # likely_systems_to_tsv(forbidden, track_multi_systems_hit, sys_file) system_filename = os.path.join(config.working_dir(), "all_systems.tsv") with open(system_filename, "w") as sys_file: likely_systems_to_tsv(models_fam_name, models_version, all_systems, track_multi_systems_hit, sys_file, header=outfile_header) cluster_filename = os.path.join(config.working_dir(), "uncomplete_systems.txt") with open(cluster_filename, "w") as clst_file: unlikely_systems_to_txt(models_fam_name, models_version, rejected_candidates, clst_file, header=outfile_header) if not (all_systems or rejected_candidates): logger.info("No Systems found in this dataset.") if skipped_replicons: for rep_name in skipped_replicons: _log.error(f"The replicon {rep_name} cannot be solved before timeout. SKIP IT.") logger.info("END")
if __name__ == "__main__": main()