#!/usr/bin/env python
|
# -*- coding: utf-8 -*-
|
# @Time : 2020/10/26 13:38
|
# @Author : Scheaven
|
# @File : defaults.py
|
# @description:
|
|
"""
|
This file contains components with some default boilerplate logic user may need
|
in training / testing. They will not work for everyone, but many users may find them useful.
|
The behavior of functions/classes in this file is subject to change,
|
since they are meant to represent the "common default behavior" people need in their projects.
|
"""
|
import argparse
|
import logging
|
import os
|
import sys
|
from utils import comm
|
from utils.env import seed_all_rng
|
from utils.file_io import PathManager
|
from utils.logger import setup_logger
|
from utils.collect_env import collect_env_info
|
from collections import OrderedDict
|
|
import torch
|
# import torch.nn.functional as F
|
# from torch.nn.parallel import DistributedDataParallel
|
|
def default_argument_parser():
|
"""
|
Create a parser with some common arguments used by fastreid users.
|
Returns:
|
argparse.ArgumentParser:
|
"""
|
parser = argparse.ArgumentParser(description="fastreid Training")
|
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
|
parser.add_argument(
|
"--finetune",
|
action="store_true",
|
help="whether to attempt to finetune from the trained model",
|
)
|
parser.add_argument(
|
"--resume",
|
action="store_true",
|
help="whether to attempt to resume from the checkpoint directory",
|
)
|
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
|
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
|
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
|
parser.add_argument("--img_a1", default="1.jpg", help="input image")
|
parser.add_argument("--img_a2", default="2.jpg", help="input image2")
|
parser.add_argument("--img_b1", default="1.jpg", help="input image")
|
parser.add_argument("--img_b2", default="2.jpg", help="input image2")
|
parser.add_argument(
|
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
|
)
|
|
# PyTorch still may leave orphan processes in multi-gpu training.
|
# Therefore we use a deterministic way to obtain port,
|
# so that users are aware of orphan processes by seeing the port occupied.
|
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
|
parser.add_argument("--dist-url", default="tcp://127.0.0.1:{}".format(port))
|
parser.add_argument(
|
"opts",
|
help="Modify config options using the command-line",
|
default=None,
|
nargs=argparse.REMAINDER,
|
)
|
return parser
|
|
def default_setup(cfg, args):
|
"""
|
Perform some basic common setups at the beginning of a job, including:
|
1. Set up the detectron2 logger
|
2. Log basic information about environment, cmdline arguments, and config
|
3. Backup the config to the output directory
|
Args:
|
cfg (CfgNode): the full config to be used
|
args (argparse.NameSpace): the command line arguments to be logged
|
"""
|
output_dir = cfg.OUTPUT_DIR
|
if comm.is_main_process() and output_dir:
|
PathManager.mkdirs(output_dir)
|
|
rank = comm.get_rank()
|
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
|
logger = setup_logger(output_dir, distributed_rank=rank)
|
|
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
|
logger.info("Environment info:\n" + collect_env_info())
|
|
logger.info("Command line arguments: " + str(args))
|
if hasattr(args, "config_file") and args.config_file != "":
|
logger.info(
|
"Contents of args.config_file={}:\n{}".format(
|
args.config_file, PathManager.open(args.config_file, "r").read()
|
)
|
)
|
|
logger.info("Running with full config:\n{}".format(cfg))
|
if comm.is_main_process() and output_dir:
|
# Note: some of our scripts may expect the existence of
|
# config.yaml in output directory
|
path = os.path.join(output_dir, "config.yaml")
|
with PathManager.open(path, "w") as f:
|
f.write(cfg.dump())
|
logger.info("Full config saved to {}".format(os.path.abspath(path)))
|
|
# make sure each worker has a different, yet deterministic seed if specified
|
seed_all_rng()
|
|
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
|
# typical validation set.
|
if not (hasattr(args, "eval_only") and args.eval_only):
|
torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
|