process.cwd __dirname __filename 区别

Node.js核心概念解析

process.cwd() 就是说process.cwd()返回的是当前Node.js进程执行时的工作目录。

__dirname: 当前模块的目录名。 等同于 __filename 的 path.dirname()。__dirname 实际上不是一个全局变量,而是每个模块内部的。

__filename 获得当前执行文件的带有完整绝对路径的文件名。

import os import sys import cv2 from cv2 import resize import numpy as np import matplotlib.pyplot as plt import argparse from PIL import Image import torch import src.utils as utils import src.dataset as dataset import crnn.seq2seq as crnn def seq2seq_decode(encoder_out, decoder, decoder_input, decoder_hidden, max_length): decoded_words = [] alph = "ABCDEFGHIJKLMNOPQRSTUVWXYZŽŠŪ-\'" converter = utils.ConvertBetweenStringAndLabel(alph) prob = 1.0 for di in range(max_length): decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_out) probs = torch.exp(decoder_output) _, topi = decoder_output.data.topk(1) ni = topi.squeeze(1) decoder_input = ni prob *= probs[:, ni] if ni == utils.EOS_TOKEN: break else: decoded_words.append(converter.decode(ni)) words = ''.join(decoded_words) prob = prob.item() return words, prob def find_median(array_vals): array_vals.sort() mid = len(array_vals) // 2 return array_vals[mid] def detect_centerline(array_vals): max_val = max(array_vals) index_list = [index for index in range(len(array_vals)) if array_vals[index] == max_val] return find_median(index_list) def rotate_image(image, angle): image_center = tuple(np.array(image.shape[1::-1]) / 2) rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0) result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR) return result def extract_peak_ranges_from_array(array_vals, minimum_val=100, minimum_range=2): start_i = None end_i = None peak_ranges = [] for i, val in enumerate(array_vals): if val >= minimum_val and start_i is None: start_i = i elif val >= minimum_val and start_i is not None: pass elif val < minimum_val and start_i is not None: end_i = i if end_i - start_i > minimum_range: peak_ranges.append((start_i, end_i)) start_i = None end_i = None elif val < minimum_val and start_i is None: pass else: raise ValueError("Cannot Parse") return peak_ranges parser = argparse.ArgumentParser() parser.add_argument('--img_path', type=str, default='', help='the path of the input image') parser.add_argument('--rot_angle', type=int, default=0, help='the global rotation image') parser.add_argument('--padding', type=int, default=10, help='paddings at the head of the image') parser.add_argument('--block_size', type=int, default=33, help='threshold for binarizing image, odd number only') parser.add_argument('--threshold', type=int, default=32, help='radius to calculate the average for thresholding, even number only') parser.add_argument('--vertical_minimum', type=int, default=800, help='minimal brightness of each VERTICAL line') parser.add_argument('--word_minimum', type=int, default=200, help='minimal brightness of each WORD') parser.add_argument('--blur', type=bool, default=False, help='apply blur to words?') parser.add_argument('--pretrained', type=int, default=1, help='which pretrained model to use') cfg = parser.parse_args() def main(): global_rot_angle = cfg.rot_angle global_padding = cfg.padding imagename = cfg.img_path if cfg.pretrained == 0: my_encoder = "./model/encoder_0.pth" my_decoder = "./model/decoder_0.pth" elif cfg.pretrained == 1: my_encoder = "./model/encoder_1.pth" my_decoder = "./model/decoder_1.pth" else: sys.exit("Unknown Pretrained Model!") print("Analyzing: "+imagename) alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZŽŠŪ-\'" print("Using Möllendorff Alphabet List: " + alphabet + "\n") # len(alphabet) + SOS_TOKEN + EOS_TOKEN num_classes = len(alphabet) + 2 transformer = dataset.ResizeNormalize(img_width=480, img_height=64) image_color = cv2.imread(imagename) image_shape = (image_color.shape[0], image_color.shape[1]) image_binary = cv2.cvtColor(image_color, cv2.COLOR_BGR2GRAY) image = cv2.rotate(image_binary, cv2.ROTATE_90_COUNTERCLOCKWISE) adaptive_threshold = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, cfg.block_size, cfg.threshold) adaptive_threshold = rotate_image(adaptive_threshold, global_rot_angle) adaptive_threshold = cv2.copyMakeBorder(adaptive_threshold, 20, 20, 20, 20, cv2.BORDER_CONSTANT, 0) adaptive_threshold = adaptive_threshold[10:adaptive_threshold.shape[0]-10, 10:adaptive_threshold.shape[1]-10] image_blur = cv2.GaussianBlur(adaptive_threshold,(3,3),cv2.BORDER_DEFAULT) cv2.imshow('Binary Image', cv2.rotate(adaptive_threshold, cv2.ROTATE_90_CLOCKWISE)) cv2.waitKey(1) vertical_sum = np.sum(image_blur, axis=1) peak_ranges = extract_peak_ranges_from_array(vertical_sum,minimum_val=cfg.vertical_minimum,minimum_range=5) img_display = np.copy(adaptive_threshold) #peak_ranges.append((peak_ranges[-1][1],adaptive_threshold.shape[0])) peak_ranges.reverse() horizontal_peak_ranges2d = [] for peak_range in peak_ranges: start_y = 0 end_y = img_display.shape[1] image_x = image_blur[peak_range[0]:peak_range[1], start_y:end_y] horizontal_sum = np.sum(image_x,axis = 0) # plt.plot(horizontal_sum, range(horizontal_sum.shape[0])) # plt.gca().invert_yaxis() # plt.show() horizontal_peak_ranges = extract_peak_ranges_from_array(horizontal_sum,minimum_val=cfg.word_minimum,minimum_range=5) horizontal_peak_ranges2d.append(horizontal_peak_ranges) for hor in horizontal_peak_ranges: cv2.rectangle(img_display, (hor[0], peak_range[0]), (hor[1], peak_range[1]), 140, 1) word_piece = adaptive_threshold[peak_range[0]:peak_range[1],hor[0]:hor[1]] if cfg.blur: word_piece = cv2.GaussianBlur(word_piece,(1,1),cv2.BORDER_DEFAULT) else: pass image_dimension = (word_piece.shape[0], word_piece.shape[1]) #cv2.imshow('Words', word_piece) #print(word_piece.shape) if image_dimension[0] < 30 or image_dimension[1] < 20: pass else: factor = 1 image_resized = cv2.resize(word_piece, (int(image_dimension[1]*factor),int(image_dimension[0]*factor)), interpolation = cv2.INTER_AREA) hor_sum = np.sum(image_resized, axis=1) ctr_line = detect_centerline(hor_sum) image_dimension_new = (image_resized.shape[0], image_resized.shape[1]) add_padding = max([ctr_line, image_dimension_new[0]-ctr_line]) # cv2.imshow('current Image', image_resized) # cv2.waitKey(0) if image_dimension_new[1]<=500: padded = cv2.copyMakeBorder(image_resized, add_padding-ctr_line, add_padding-image_dimension_new[0]+ctr_line, 0, 0, cv2.BORDER_CONSTANT, 0) else: padded = image_resized factor = 64/padded.shape[0] padded = cv2.resize(padded, (int(padded.shape[1]*factor),int(padded.shape[0]*factor)), interpolation = cv2.INTER_AREA) padded = cv2.copyMakeBorder(padded, 0, 0, global_padding, 480 - global_padding - padded.shape[0], cv2.BORDER_CONSTANT, 0) padded = Image.fromarray(np.uint8(padded)).convert('L') padded = transformer(padded) padded = padded.view(1, *padded.size()) padded = torch.autograd.Variable(padded) encoder = crnn.Encoder(1, 1024) # no dropout during inference decoder = crnn.Decoder(1024, num_classes, dropout_p=0.0, max_length=121) map_location = 'cpu' encoder.load_state_dict(torch.load(my_encoder, map_location=map_location)) decoder.load_state_dict(torch.load(my_decoder, map_location=map_location)) encoder.eval() decoder.eval() encoder_out = encoder(padded) max_length = 121 decoder_input = torch.zeros(1).long() decoder_hidden = decoder.initHidden(1) words, prob = seq2seq_decode(encoder_out, decoder, decoder_input, decoder_hidden, max_length) print(words+" ", end = '') print("\n") cv2.destroyAllWindows() cv2.imshow('Current Line', cv2.rotate(img_display, cv2.ROTATE_90_CLOCKWISE)) cv2.waitKey(1) input("Reading Completed, Press Any Key to Exit. Ambula Baniha.") # color = (0, 0, 255) # for i, peak_range in enumerate(peak_ranges): # for horizontal_range in horizontal_peak_ranges2d[i]: # x = peak_range[0] # y = horizontal_range[0] # w = peak_range[1] # h = horizontal_range[1] # patch = adaptive_threshold[x:w,y:h] # cv2.rectangle(img_display, (y,x), (h,w), 255, 2) # # print(cnt) # # cv2.imwrite("/Users/zhuohuizhang/Downloads/ManchuOCR/Data/"+fontname+"/Result/"+'%d' %cnt + '.jpg', patch) # cnt += 1 # # cv2.imshow('Vertical Segmented Image', line_seg_blur) # cv2.waitKey(0) if __name__ == "__main__": main() 根据上面代码创建一个新的图形化py脚本,新的图形化py脚本可以调用上面代码和参数:--img_path(该参数是必填,该选项是选择图片)、--rot_angle(旋转角度,默认是0,选填)、--padding(图形头部的填充,默认值是10,选填)、--block_size(图像二值化阈值,仅限奇数,默认值是33,选填)、--threshold(计算阈值平均值的半径,仅限偶数,默认值32,选填)、--vertical_minimum(每条垂直线的最小亮度,默认值800,选填)、--word_minimum(每个单词的最小亮度,默认值200,选填)、--blur(对文字应用模糊效果?,默认值False,选填),输入参数后点击确认即可将上述代码打印的信息显示出来,上述脚本的名称为readmanchu.py
最新发布
09-26
import re import pandas as pd import os from pathlib import Path def extract_rte_write_functions(file_path): """ 从C文件中提取所有Rte_Write函数名称 参数: file_path (str): C文件路径 返回: list: 提取的函数名称列表 """ pattern = r'Rte_Write_\w+' # 匹配Rte_Write_后跟函数名的模式 functions = set() # 使用集合自动去重 try: # 显式指定UTF-8编码,避免编码错误 with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: for line in f: # 检查是否包含Rte_Write调用 if "Rte_Write" in line: # 查找所有匹配的函数名 matches = re.findall(pattern, line) for match in matches: # 移除可能的分号或空格 functions.add(match.strip(' ;')) except Exception as e: print(f"文件读取错误: {str(e)}") return [] return sorted(functions) # 返回排序后的列表 def save_to_excel(data, output_file): """ 将数据保存到Excel文件 参数: data (list): 要保存的数据列表 output_file (str): 输出文件路径 """ if not data: print("未找到有效数据,跳过Excel生成") return try: df = pd.DataFrame(data, columns=['Rte_Write Function']) df.to_excel(output_file, index=False) print(f"✓ 成功导出到Excel: {output_file}") print(f"共找到 {len(data)} 个唯一函数调用") except Exception as e: print(f"Excel导出失败: {str(e)}") def get_valid_file_path(default_file): """ 获取有效的文件路径,支持多种输入方式 参数: default_file (str): 默认文件名 返回: str: 有效的文件绝对路径 """ # 尝试方案1:脚本所在目录查找 script_dir = os.path.dirname(os.path.abspath(__file__)) script_path = os.path.join(script_dir, default_file) if Path(script_path).is_file(): print(f"自动使用脚本所在目录文件: {script_path}") return script_path # 尝试方案2:当前工作目录查找 cwd = os.getcwd() cwd_path = os.path.join(cwd, default_file) if Path(cwd_path).is_file(): print(f"自动使用当前工作目录文件: {cwd_path}") return cwd_path # 尝试方案3:用户手动输入 print("\n" + "="*50) print(f"文件未自动定位,请手动输入 {default_file} 的完整路径") print("提示:可将文件拖拽到终端窗口获取完整路径") print("="*50 + "\n") while True: user_path = input("请输入完整文件路径: ").strip() # 移除路径两端的引号(拖拽文件有时会带引号) user_path = user_path.strip('"').strip() if Path(user_path).is_file(): return user_path print(f"❌ 文件不存在: {user_path},请重新输入") if __name__ == "__main__": # 配置输入输出文件名 input_filename = "MID_E2E.c" output_filename = "rte_write_functions.xlsx" # 智能定位文件路径 input_file = get_valid_file_path(input_filename) # 确保输出文件在可写目录 script_dir = os.path.dirname(os.path.abspath(__file__)) output_file = os.path.join(script_dir, output_filename) # 提取函数名称并保存结果 functions = extract_rte_write_functions(input_file) save_to_excel(functions, output_file) # 显示额外信息 print("\nRTE函数提取完成!") print(f"输入文件: {input_file}") print(f"输出文件: {output_file}") if functions: print(f"前5个函数示例: {functions[:5]}") 这段代码我看着很乱能不能优化
09-19
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值