python ./lib/train.py 路径说明 os.getcwd() os.path.dirname(__file__)

本文详细解析了在Python中使用os模块获取当前工作目录及脚本路径的方法。通过实例演示了os.getcwd()与os.path.dirname(__file__)的区别,帮助读者深入理解文件路径处理。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

当在terminal中运行 python ./lib/train.py时,以下代码的输入结果为


import os
print(os.getcwd()) # . 打印的是项目根目录
print(os.path.dirname(__file__)) # ./lib 打印的是.py脚本的路径

 

/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/utils/generic.py:441: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead. _torch_pytree._register_pytree_node( /home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/utils/generic.py:309: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead. _torch_pytree._register_pytree_node( hornetq Traceback (most recent call last): File "/home/ls_lunwen/KEPT-main/Kept/trace/main/./train_trace_rapt.py", line 28, in <module> main() File "/home/ls_lunwen/KEPT-main/Kept/trace/main/./train_trace_rapt.py", line 19, in main preprocess_dataset(args.data_dir,args.project) File "/home/ls_lunwen/KEPT-main/Kept/trace/main/../../common/data_processing.py", line 314, in preprocess_dataset process_project(base_dir,project) File "/home/ls_lunwen/KEPT-main/Kept/trace/main/../../common/data_processing.py", line 301, in process_project bug_reports_pd, links_pd, file_diffs_pd = process_data(issue_path, commit_path,data_type) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/KEPT-main/Kept/trace/main/../../common/data_processing.py", line 205, in process_data issue_pd = pd.read_csv(issue_path) ^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/pandas/io/parsers/readers.py", line 912, in read_csv return _read(filepath_or_buffer, kwds) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/pandas/io/parsers/readers.py", line 577, in _read parser = TextFileReader(filepath_or_buffer, **kwds) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/pandas/io/parsers/readers.py", line 1407, in __init__ self._engine = self._make_engine(f, self.engine) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/pandas/io/parsers/readers.py", line 1661, in _make_engine self.handles = get_handle( ^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/pandas/io/common.py", line 859, in get_handle handle = open( ^^^^^ FileNotFoundError: [Errno 2] No such file or directory: '/home/yueli/HuYworks1/kept/input_data/raw/issue/hornetq.csv' /home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/utils/generic.py:441: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead. _torch_pytree._register_pytree_node( /home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/utils/generic.py:309: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead. _torch_pytree._register_pytree_node( eval parameters %s Namespace(project='hornetq', code_kg_mode='inner', data_dir='/home/yueli/HuYworks1/kept/input_data/', model_path='./output/hornetq', no_cuda=False, test_num=None, output_dir='./result/', overwrite=False, code_bert='../unixCoder', chunk_query_num=-1, per_gpu_eval_batch_size=32, code_kg_location='/home/yueli/HuYworks1/kept/input_data/hornetq/', text_kg_location='/home/yueli/HuYworks1/kept/input_data/hornetq/', length_limit=256, tqdm_interval=300.0, data_name='hornetq') Traceback (most recent call last): File "/home/ls_lunwen/KEPT-main/Kept/trace/main/eval_trace_rapt.py", line 25, in <module> model = Rapt(BertConfig(), args.code_bert) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/KEPT-main/Kept/trace/main/../../common/models.py", line 84, in __init__ self.ctokneizer = AutoTokenizer.from_pretrained(cbert_model, local_files_only=True) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/models/auto/tokenization_auto.py", line 752, in from_pretrained config = AutoConfig.from_pretrained( ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py", line 1082, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/configuration_utils.py", line 644, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/configuration_utils.py", line 699, in _get_config_dict resolved_config_file = cached_file( ^^^^^^^^^^^^ File "/home/ls_lunwen/anaconda3/envs/llm/lib/python3.11/site-packages/transformers/utils/hub.py", line 360, in cached_file raise EnvironmentError( OSError: ../unixCoder does not appear to have a file named config.json. Checkout 'https://huggingface.co/../unixCoder/None' for available files.
07-11
# 将图片和标注数据按比例切分为 训练集和测试集 import shutil import random import os # 原始路径 image_original_path = "data/images/" label_original_path = "data/labels/" cur_path = os.getcwd() #cur_path = 'D:/image_denoising_test/denoise/' # 训练集路径 train_image_path = os.path.join(cur_path, "datasets/images/train/") train_label_path = os.path.join(cur_path, "datasets/labels/train/") # 验证集路径 val_image_path = os.path.join(cur_path, "datasets/images/val/") val_label_path = os.path.join(cur_path, "datasets/labels/val/") # 测试集路径 test_image_path = os.path.join(cur_path, "datasets/images/test/") test_label_path = os.path.join(cur_path, "datasets/labels/test/") # 训练集目录 list_train = os.path.join(cur_path, "datasets/train.txt") list_val = os.path.join(cur_path, "datasets/val.txt") list_test = os.path.join(cur_path, "datasets/test.txt") train_percent = 0.8 val_percent = 0.1 test_percent = 0.1 def del_file(path): for i in os.listdir(path): file_data = path + "\\" + i os.remove(file_data) def mkdir(): if not os.path.exists(train_image_path): os.makedirs(train_image_path) else: del_file(train_image_path) if not os.path.exists(train_label_path): os.makedirs(train_label_path) else: del_file(train_label_path) if not os.path.exists(val_image_path): os.makedirs(val_image_path) else: del_file(val_image_path) if not os.path.exists(val_label_path): os.makedirs(val_label_path) else: del_file(val_label_path) if not os.path.exists(test_image_path): os.makedirs(test_image_path) else: del_file(test_image_path) if not os.path.exists(test_label_path): os.makedirs(test_label_path) else: del_file(test_label_path) def clearfile(): if os.path.exists(list_train): os.remove(list_train) if os.path.exists(list_val): os.remove(list_val) if os.path.exists(list_test): os.remove(list_test) def main(): mkdir() clearfile() file_train = open(list_train, 'w') file_val = open(list_val, 'w') file_test = open(list_test, 'w') total_txt = os.listdir(label_original_path) num_txt = len(total_txt) list_all_txt = range(num_txt) num_train = int(num_txt * train_percent) num_val = int(num_txt * val_percent) num_test = num_txt - num_train - num_val train = random.sample(list_all_txt, num_train) # train从list_all_txt取出num_train个元素 # 所以list_all_txt列表只剩下了这些元素 val_test = [i for i in list_all_txt if not i in train] # 再从val_test取出num_val个元素,val_test剩下的元素就是test val = random.sample(val_test, num_val) print("训练集数目:{}, 验证集数目:{}, 测试集数目:{}".format(len(train), len(val), len(val_test) - len(val))) for i in list_all_txt: name = total_txt[i][:-4] srcImage = image_original_path + name + '.jpg' srcLabel = label_original_path + name + ".txt" if i in train: dst_train_Image = train_image_path + name + '.jpg' dst_train_Label = train_label_path + name + '.txt' shutil.copyfile(srcImage, dst_train_Image) shutil.copyfile(srcLabel, dst_train_Label) file_train.write(dst_train_Image + '\n') elif i in val: dst_val_Image = val_image_path + name + '.jpg' dst_val_Label = val_label_path + name + '.txt' shutil.copyfile(srcImage, dst_val_Image) shutil.copyfile(srcLabel, dst_val_Label) file_val.write(dst_val_Image + '\n') else: dst_test_Image = test_image_path + name + '.jpg' dst_test_Label = test_label_path + name + '.txt' shutil.copyfile(srcImage, dst_test_Image) shutil.copyfile(srcLabel, dst_test_Label) file_test.write(dst_test_Image + '\n') file_train.close() file_val.close() file_test.close() if __name__ == "__main__": main() 以上代码报错 Traceback (most recent call last): File "C:\Users\Administrator\ultralytics\trainTest.py", line 133, in <module> main() ~~~~^^ File "C:\Users\Administrator\ultralytics\trainTest.py", line 111, in main shutil.copyfile(srcImage, dst_train_Image) ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\software\anaconda3\Lib\shutil.py", line 260, in copyfile with open(src, 'rb') as fsrc: ~~~~^^^^^^^^^^^ FileNotFoundError: [Errno 2] No such file or directory: 'data/images/classes.jpg'
07-25
FileNotFoundError Traceback (most recent call last) Cell In[79], line 6 3 DATA_PATH = "C:/Users/wxn/Desktop/2014.csv" 5 # 加载数据 ----> 6 X, y = load_data(DATA_PATH) 8 if X is not None and y is not None: 9 # 训练评估模型 10 model = train_evaluate_svm(X, y) Cell In[76], line 2, in load_data(file_path) 1 def load_data(file_path): ----> 2 df = pd.read_csv(R"C:\Users\wxn\Desktop\2014.csv") 3 print(f"数据加载成功! 形状: {df.shape}") 4 print(f"特征列: {list(df.columns[:-1])}") File /opt/conda/envs/anaconda-2024.02-py310/lib/python3.10/site-packages/pandas/io/parsers/readers.py:948, in read_csv(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, date_format, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options, dtype_backend) 935 kwds_defaults = _refine_defaults_read( 936 dialect, 937 delimiter, (...) 944 dtype_backend=dtype_backend, 945 ) 946 kwds.update(kwds_defaults) --> 948 return _read(filepath_or_buffer, kwds) File /opt/conda/envs/anaconda-2024.02-py310/lib/python3.10/site-packages/pandas/io/parsers/readers.py:611, in _read(filepath_or_buffer, kwds) 608 _validate_names(kwds.get("names", None)) 610 # Create the parser. --> 611 parser = TextFileReader(filepath_or_buffer, **kwds) 613 if chunksize or iterator: 614 return parser File /opt/conda/envs/anaconda-2024.02-py310/lib/python3.10/site-packages/pandas/io/parsers/readers.py:1448, in TextFileReader.__init__(self, f, engine, **kwds) 1445 self.options["has_index_names"] = kwds["has_index_names"] 1447 self.handles: IOHandles | None = None -> 1448 self._engine = self._make_engine(f, self.engine) File /opt/conda/envs/anaconda-2024.02-py310/lib/python3.10/site-packages/pandas/io/parsers/readers.py:1705, in TextFileReader._make_engine(self, f, engine) 1703 if "b" not in mode: 1704 mode += "b" -> 1705 self.handles = get_handle( 1706 f, 1707 mode, 1708 encoding=self.options.get("encoding", None), 1709 compression=self.options.get("compression", None), 1710 memory_map=self.options.get("memory_map", False), 1711 is_text=is_text, 1712 errors=self.options.get("encoding_errors", "strict"), 1713 storage_options=self.options.get("storage_options", None), 1714 ) 1715 assert self.handles is not None 1716 f = self.handles.handle File /opt/conda/envs/anaconda-2024.02-py310/lib/python3.10/site-packages/pandas/io/common.py:863, in get_handle(path_or_buf, mode, encoding, compression, memory_map, is_text, errors, storage_options) 858 elif isinstance(handle, str): 859 # Check whether the filename is to be opened in binary mode. 860 # Binary mode does not support 'encoding' and 'newline'. 861 if ioargs.encoding and "b" not in ioargs.mode: 862 # Encoding --> 863 handle = open( 864 handle, 865 ioargs.mode, 866 encoding=ioargs.encoding, 867 errors=errors, 868 newline="", 869 ) 870 else: 871 # Binary mode 872 handle = open(handle, ioargs.mode) FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\wxn\\Desktop\\2014.csv'这样的问题如何解决
最新发布
08-14
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值