shutil 复制,移除,数据的model

本文提供了一个使用Python的shutil模块进行文件夹复制的例子。具体展示了如何从一个路径复制整个目录到另一个路径。
1 #!/usr/bin/env python
2 import shutil
3
4 shutil.copytree('/home/aircoder/python/api','/home/aircoder/python/bakshutil');
5
from utiles.dataset import * from utiles.trainer import * import model.utils as model_utils import torch import os import shutil import datetime import hydra from omegaconf import OmegaConf import logging def set_logger(path): logger = logging.getLogger() logger.setLevel(logging.INFO) if not logger.handlers: # Logging to a file file_handler = logging.FileHandler(path) file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:' ' %(message)s')) logger.addHandler(file_handler) # Logging to console stream_handler = logging.StreamHandler() stream_handler.setFormatter(logging.Formatter('%(message)s')) logger.addHandler(stream_handler) @hydra.main(version_base=None, config_path="config", config_name="config_train") def main(args: OmegaConf): nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S') args.checkpoint_dir = args.checkpoint_dir + '{}'.format(nowtime) +'/' if not os.path.exists(os.path.abspath(args.checkpoint_dir)): os.makedirs(os.path.abspath(args.checkpoint_dir)) else: shutil.rmtree(args.checkpoint_dir, ignore_errors=True) shutil.rmtree(args.log_dir, ignore_errors=True) set_logger(os.path.join(args.checkpoint_dir, 'process.log')) logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) OmegaConf.save(config=args, f=os.path.join(args.checkpoint_dir, 'conf.yaml')) traindataset, validate_dataset, testdataset = get_datasets(args) train_dataloader = torch.utils.data.DataLoader(traindataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, worker_init_fn=worker_init_fn) val_dataloder = torch.utils.data.DataLoader(validate_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, worker_init_fn=worker_init_fn) test_dataloder = torch.utils.data.DataLoader(testdataset, batch_size=args.batch_size) model,criterion,optimizer = get_model_loss_optimizer(args) if args.Model == 'deepcrf-con': if args.load_model is not None: print("Continuing training full model from checkpoint " + str(args.load_model)) state = model_utils.load_model(model, optimizer, args.load_model, args.cuda) else: state = training(train_dataloader, val_dataloder, test_dataloder, model, optimizer, criterion, args) args.Model = 'deepcrf' args.loss = 'cross' model,criterion,optimizer = get_model_loss_optimizer(args) state = model_utils.load_model(model, None, state["best_checkpoint"], args.cuda) state = training(train_dataloader, val_dataloder, test_dataloder, model, optimizer, criterion, args) else: if args.load_model is not None: print("Continuing training full model from checkpoint " + str(args.load_model)) state = model_utils.load_model(model, None, args.load_model, args.cuda) if args.cuda: model = model.cuda() state = training(train_dataloader, val_dataloder, test_dataloder, model, optimizer, criterion, args) else: state = training(train_dataloader, val_dataloder, test_dataloder, model, optimizer, criterion, args) if __name__ == '__main__': main() 这段代码可以在cpu上跑吗?如果可以的话,需要改哪些参数
09-17
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值