3.LoadConfig及LoadProfile函数

本文探讨了Configure结构体在C语言中的应用,类似于C++的类概念,用于操作配置文件。LoadConfig主要检查并创建.fcitx目录和config文件,将数据写入。对于写配置文件,通过SaveConfig实现,可能涉及特殊处理,如快捷键转换。建议使用现成库或封装独立模块以降低耦合性。

一、读取配置文件

/*
 * 读取用户的配置文件
 */
void LoadConfig (Bool bMode)
{
    FILE    *fp;
    char    buf[PATH_MAX], *pbuf, *pbuf1;
    Bool    bFromUser = True;// 用以标识配置文件是用户家目录下的,还是从安装目录下拷贝过来的。
    int     group_idx, i;
    Configure   *tmpconfig;// 见解说[1]

    bIsReloadConfig = bMode;// 这是一个全局变量,定义于“src/tool.c[193]"

    pbuf = getenv("HOME");// 从环境变量中获取当前用户家目录的绝对路径
    if(!pbuf){
        fprintf(stderr, "error: get environment variable HOME\n");
        exit(1);    // 其实我绝对这儿也可以不退出的。只是处理起来有点烦。
                    // 需要注意需要直接使用安装目录里面的配置文件,而且不能网配置文件里面写了。直接推出的方法,简单。
    }
    snprintf(buf, PATH_MAX, "%s/.fcitx/config", pbuf);// 获取配置文件的绝对路径

    fp = fopen(buf, "r");// 打开配置文件,如果打不开,说明不存在。就从安装目录里面拷贝一份默认的过来。
    if(!fp && errno == ENOENT){ /* $HOME/.fcitx/config does not exist */
        snprintf(buf, PATH_MAX, PKGDATADIR "/data/config");
        bFromUser = False;
        fp = fopen(buf, "r");
        if(!fp){
            perror("fopen");
            exit(1);// 如果安装目录里面也没有配置文件,那就没办法了。只好告诉用户,无法运行了。
        }
    }

    if(!bFromUser) /* create default configure file */
        SaveConfig();// 解说[2]

    group_idx = -1;//用于标示group的index,在配置文件里面配置是分组的,类似与ini文件的分组。

    /* FIXME: 也许应该用另外更恰当的缓冲区长度 */
    while(fgets(buf, PATH_MAX, fp)){// 每次最多读入PATH_MAX大小的数据。
        i = strlen(buf);
        if(buf[i-1] != '\n'){// 这行代码决定了,fcitx的配置文件每行最多是PATH_MAX个字符。
                            // 如果多余PATH_MAX个字符,就会退出。这也是为什么在前面作者留下那个FIXME的原因。
            fprintf(stderr, "error: configure file: line length\n");
            exit(1);
        }else
            buf[i-1] = '\0';// 给字符串做个封口。否则可能会出现问题。
                            // 比如上一次读入25个字符,这一次仅仅读入2个,那么如果不在这儿加一个空字符的话下面就也会认为是25个字符

        pbuf = buf;
        while(*pbuf && isspace(*pbuf))// 将pbuf指向第一个非空字符
            pbuf++;
        if(!*pbuf || *pbuf == '#')// 如果改行是空数据或者是注释(以#开始的语句)
            continue;

        if(*pbuf == '['){ /* get a group name 组名的格式为“[组名]”*/
            pbuf++;
            pbuf1 = strchr(pbuf, ']');
            if(!pbuf1){
                fprintf(stderr, "error: configure file: configure group name\n");
                exit(1);
            }
            // 根据group的名字找到其在全局变量configure_groups中的index
            group_idx = -1;
            for(i = 0; configure_groups[i].name; i++)
                if(strncmp(configure_groups[i].name, pbuf, pbuf1-pbuf) == 0){
                    group_idx = i;
                    break;
                }
            if(group_idx < 0){// 我认为这儿没有必要退出。此处完全可以忽略这个错误,并且在后面也忽略这个组的配置即可。
                            // 因为这儿退出只会带来一个坏处,那就是扩展性。以后再添加新的组的时候,老版本的程序就无法使用新版本的配置文件了。
                            // 或者,添加了一个可选扩展,该扩展新添加一个组等等。
                            // 所以,此处应该给一个警告,而不是退出。
                fprintf(stderr, "error: invalid configure group name\n");
                exit(1);
            }
            continue;
        }

        // pbuf1指向第一个非空字符与=之间的字符
        pbuf1 = strchr(pbuf, '=');
        if(!pbuf1){// 和前面一样,这儿也应该是一个警告而不应该是提示出错并退出。
            fprintf(stderr, "error: configure file: configure entry name\n");
            exit(1);
        }

        // 这儿避免的是那样一种情况,即从文件头到第一个配置项(即类似与“配置名=配置值”的一行字符串)并没有任何分组。
        // 也就是防止出现下面的“配置1”和“配置2”
        ////////////////////////////////////////////
        //#文件头
        //配置1=123
        //配置2=123
        //[组名]
        //...
        //#文件尾
        ////////////////////////////////////////////
        if(group_idx < 0){
            fprintf(stderr, "error: configure file: no group name at beginning\n");
            exit(1);
        }

        // 找到该组中的配置项,并将其保存到对应的全局变量里面去
        for(tmpconfig = configure_groups[group_idx].configure;
                tmpconfig->name; tmpconfig++)
        {
            if(strncmp(tmpconfig->name, pbuf, pbuf1-pbuf) == 0)
                read_configure(tmpconfig, ++pbuf1);
        }
    }

    fclose(fp);

    // Ctrl+Space就是fcitx的开关快捷键,此处构造一个结构备用。至于为什么这样做,现在还不清楚。接下去看可能会明白作者的用意。
    if (!Trigger_Keys) {
        iTriggerKeyCount = 0;
        Trigger_Keys = (XIMTriggerKey *) malloc (sizeof (XIMTriggerKey) * (iTriggerKeyCount + 2));
        Trigger_Keys[0].keysym = XK_space;
        Trigger_Keys[0].modifier = ControlMask;
        Trigger_Keys[0].modifier_mask = ControlMask;
        Trigger_Keys[1].keysym = 0;
        Trigger_Keys[1].modifier = 0;
        Trigger_Keys[1].modifier_mask = 0;
    }
 }
 // 相比与LoadConfig来说,LoadProfile实在是简单了很多。因为它没有分组的概念,也不处理注释
 void LoadProfile (void)
 {
    FILE           *fp;
    char            buf[PATH_MAX], *pbuf, *pbuf1;
    int             i;
    Configure       *tmpconfig;
    // 前将窗口的位置设定为最原始的默认值,接下来如果配置文件有,会从配置文件中读取,如果没有就使用这个了。
    iMainWindowX = MAINWND_STARTX;        // 主窗口位置X
    iMainWindowY = MAINWND_STARTY;        // 主窗口位置Y
    iInputWindowX = INPUTWND_STARTX;    // 输入窗口位置X
    iInputWindowY = INPUTWND_STARTY;    // 输入窗口位置Y

    pbuf = getenv("HOME");
    if(!pbuf){
        fprintf(stderr, "error: get environment variable HOME\n");
        exit(1);
    }
    snprintf(buf, PATH_MAX, "%s/.fcitx/profile", pbuf);

    fp = fopen(buf, "r");
    if(!fp){
        if(errno == ENOENT)
            SaveProfile();
        return;
    }

    /* FIXME: 也许应该用另外更恰当的缓冲区长度 */
    while(fgets(buf, PATH_MAX, fp)){
        i = strlen(buf);
        if(buf[i-1] != '\n'){
            fprintf(stderr, "error: profile file: line length\n");
            exit(1);
        }else
            buf[i-1] = '\0';

        pbuf = buf;
        while(*pbuf && isspace(*pbuf))
            pbuf++;
        if(!*pbuf || *pbuf == '#')
            continue;

        pbuf1 = strchr(pbuf, '=');
        if(!pbuf1){
            fprintf(stderr, "error: profile file: configure entry name\n");
            exit(1);
        }

        for(tmpconfig = profiles; tmpconfig->name; tmpconfig++)
            if(strncmp(tmpconfig->name, pbuf, pbuf1-pbuf) == 0)
                read_configure(tmpconfig, ++pbuf1);
    }
    fclose(fp);

    // 如果需要保存的话,保存配置。什么时候需要保存呢?
    if(bIsNeedSaveConfig){
        SaveConfig();
        SaveProfile();
    }
 }

解说:
1. Configure这个结构是作者定义的一个用于操作配置文件的结构。一半来说结构体是用来保存数据的,但是,我这儿却说是用来操作数据的结构体。是不是说错了?没有。这个结构体很有一点c++里面类的意思。即它既又数据,也有数据的处理函数。当然,在这儿就不是成员函数了。而是一个函数指针。
2.
其实,在C语言里面,此类用法是很多的。在linux内核代码的虚拟文件系统里面表现尤其突出。包括我们经常使用FILE都是这类结构体。

 至于,这儿的Configure就不做过多的讲解了,它被定义在"src/tool.c"[225]里面,里面有详细的注释。  
  1. 此时还没有从安装目录下读取配置信息。而SaveConfig()做的工作也仅仅是:
    (1). 检查.fcitx目录是否存在,如果目录不存在,创建
    (2). 检查config文件是否存在,如果不存在,创建
    (3). 将全局变量configure_groups中的数据写入到配置文件中。这儿写配置文件的方式是有一小点技巧的。我会在“写配置文件”这一节做详细说明。

    个人观点
    fcitx好像没有必要自己去分析配置文件,与其自己分析,不如使用现成的库去读写。类似的库应该有不少的。即便是自己做读写,也应该独立到一个模块里面去。比如封装到一个独立的so里面去。这样耦合性会很低,升级也方便的多。

二、写配置文件

关于配置文件的写,我就不再解说SaveProfile了,我只简单分析一下SaveConfig函数。

 /*
 * 保存配置信息
    */
     void SaveConfig (void)
     {
    FILE    *fp;
    char    buf[PATH_MAX], *pbuf;
    Configure_group *tmpgroup;

    pbuf = getenv("HOME");
    if(!pbuf){
        fprintf(stderr, "error: get environment variable HOME\n");
        exit(1);
    }

    snprintf(buf, PATH_MAX, "%s/.fcitx", pbuf);
    if(mkdir(buf, S_IRWXU) < 0 && errno != EEXIST){
        perror("mkdir");
        exit(1);
    }

    snprintf(buf, PATH_MAX, "%s/.fcitx/config", pbuf);
    fp = fopen (buf, "w");
    if (!fp) {
        perror("fopen");
        exit(1);
    }

    // 实际上,写配置文件很简单,就是从全局数组configure_groups里面分别把每个组的配置写入到文件里面去
    for(tmpgroup = configure_groups; tmpgroup->name; tmpgroup++){
        if(tmpgroup->comment)// 如果存在注释,先写入
            fprintf(fp, "# %s\n", tmpgroup->comment);
        fprintf(fp, "[%s]\n", tmpgroup->name);// 接下来写入组的名字
        write_configures(fp, tmpgroup->configure);// 最后将该组的每个配置项写入到文件中
        fprintf(fp, "\n");// 插入一个空行,没有什么用处,仅仅是为了增强配置文件的可读性。
    }
    fclose(fp);
    }

/* 将 configures 中的配置信息写入 fp */
static int write_configures(FILE *fp, Configure *configures)
{
    Configure *tc;

    for(tc = configures; tc->name; tc++){
        if(tc->comment)//如果有注释,先写入
            fprintf(fp, "# %s\n", tc->comment);
        if(tc->config_rw)// 解说[1]
            tc->config_rw(tc, fp, 0);
        else{
            switch(tc->value_type){
                case CONFIG_INTEGER:
                    generic_config_integer(tc, fp, 0);
                    break;
                case CONFIG_STRING:
                    generic_config_string(tc, fp, 0);
                    break;
                case CONFIG_COLOR:
                    generic_config_color(tc, fp, 0);
                    break;
                default:
                    fprintf(stderr, "error: shouldn't be here\n");// 个人认为这儿应该是一个断言。
                                                                  //因为,如果出现一个意料之外的类型,说明是程序的BUG,而不是运行时的错误。
                    exit(1);
            }
        }
    }
    return 0;
}

解说:
1. 如果存在特定的读写函数,使用其特定的读写函数写入。为什么这儿不使用通用的函数呢?

因为有些时候,数据写入到配置文件前可能需要进行一些特殊的处理,比如将16进制的颜色值转换成字符串等等。在比如写入一些特定的符号,如快捷键。在这儿,应该就是为了处理快捷键的内部标示与L_CTRL、R_CTRL之类的宏的转换。

接着解读第二段:def train_JSCC_with_DnCNN(config, CHDDIM_config): from DnCNN.models import DnCNN import torch.nn as nn encoder = network.JSCC_encoder(config, config.C).cuda() decoder = network.JSCC_decoder(config, config.C).cuda() encoder_path = config.encoder_path decoder_path = config.decoder_path pass_channel = channel.Channel(config) trainLoader, _ = get_loader(config) encoder.eval() DnCNN=DnCNN(config.C).cuda() # encoder = torch.nn.DataParallel(encoder, device_ids=CHDDIM_config.device_ids) # decoder = torch.nn.DataParallel(decoder, device_ids=CHDDIM_config.device_ids) # CHDDIM = torch.nn.DataParallel(CHDDIM, device_ids=CHDDIM_config.device_ids) # # encoder = encoder.cuda(device=CHDDIM_config.device_ids[0]) # decoder = decoder.cuda(device=CHDDIM_config.device_ids[0]) # CHDDIM = CHDDIM.cuda(device=CHDDIM_config.device_ids[0]) encoder.load_state_dict(torch.load(encoder_path)) decoder.load_state_dict(torch.load(decoder_path)) ckpt = torch.load(CHDDIM_config.save_path) DnCNN.load_state_dict(ckpt) DnCNN.eval() # optimizer_encoder = torch.optim.AdamW( # encoder.parameters(), lr=CHDDIM_config.lr, weight_decay=1e-4) optimizer_decoder = torch.optim.Adam( decoder.parameters(), lr=CHDDIM_config.lr) # start training if config.dataset == "CIFAR10": CalcuSSIM = MS_SSIM(window_size=3, data_range=1., levels=4, channel=3).cuda() else: CalcuSSIM = MS_SSIM(data_range=1., levels=4, channel=3).cuda() for e in range(config.retrain_epoch): with tqdm(trainLoader, dynamic_ncols=True) as tqdmtrainLoader: for i, (images, labels) in enumerate(tqdmtrainLoader): # train snr = config.SNRs - CHDDIM_config.large_snr x_0 = images.cuda() feature, _ = encoder(x_0) y = feature y, pwr, h = pass_channel.forward(y, snr) # normalize sigma_square = 1.0 / (2 * 10 ** (config.SNRs / 10)) if config.channel_type == "awgn": y_awgn = torch.cat((torch.real(y), torch.imag(y)), dim=2) #mse1 = torch.nn.MSELoss()(y_awgn * math.sqrt(2), y * math.sqrt(2) / torch.sqrt(pwr)) receive=y_awgn elif config.channel_type == &#39;rayleigh&#39;: y_mmse = y * torch.conj(h) / (torch.abs(h) ** 2 + sigma_square * 2) y_mmse = torch.cat((torch.real(y_mmse), torch.imag(y_mmse)), dim=2) #mse1 = torch.nn.MSELoss()(y_mmse * math.sqrt(2), y * math.sqrt(2) / torch.sqrt(pwr)) receive=y_mmse else: raise ValueError feature_hat = receive-DnCNN(receive) feature_hat = feature_hat * torch.sqrt(pwr) x_0_hat = decoder(feature_hat) # mse1=torch.nn.MSEloss()() if config.loss_function == "MSE": loss = torch.nn.MSELoss()(x_0, x_0_hat) elif config.loss_function == "MSSSIM": loss = CalcuSSIM(x_0, x_0_hat.clamp(0., 1.)).mean() else: raise ValueError optimizer_decoder.zero_grad() loss.backward() optimizer_decoder.step() # optimizer_encoder.step() if config.loss_function == "MSE": mse = torch.nn.MSELoss()(x_0 * 255., x_0_hat.clamp(0., 1.) * 255) psnr = 10 * math.log10(255. * 255. / mse.item()) matric = psnr elif config.loss_function == "MSSSIM": msssim = 1 - CalcuSSIM(x_0, x_0_hat.clamp(0., 1.)).mean().item() matric = msssim tqdmtrainLoader.set_postfix(ordered_dict={ "dataset": config.dataset, "state": "train_decoder" + config.loss_function, "noise_schedule":CHDDIM_config.noise_schedule, "channel": config.channel_type, "CBR:": feature.numel() / 2 / x_0.numel(), "SNR": snr, "matric": matric, "T_max":CHDDIM_config.t_max }) if (e + 1) % config.retrain_save_model_freq == 0: torch.save(decoder.state_dict(), config.re_decoder_path) def eval_JSCC_with_DnCNN(config, CHDDIM_config): from DnCNN.models import DnCNN import torch.nn as nn encoder = network.JSCC_encoder(config, config.C).cuda() decoder = network.JSCC_decoder(config, config.C).cuda() encoder_path = config.encoder_path decoder_path = config.re_decoder_path pass_channel = channel.Channel(config) encoder.eval() decoder.eval() _, testLoader = get_loader(config) DnCNN=DnCNN(config.C).cuda() # encoder = torch.nn.DataParallel(encoder, device_ids=CHDDIM_config.device_ids) # decoder = torch.nn.DataParallel(decoder, device_ids=CHDDIM_config.device_ids) # CHDDIM = torch.nn.DataParallel(CHDDIM, device_ids=CHDDIM_config.device_ids) # # encoder = encoder.cuda(device=CHDDIM_config.device_ids[0]) # decoder = decoder.cuda(device=CHDDIM_config.device_ids[0]) # CHDDIM = CHDDIM.cuda(device=CHDDIM_config.device_ids[0]) encoder.load_state_dict(torch.load(encoder_path)) ckpt = torch.load(CHDDIM_config.save_path) DnCNN.load_state_dict(ckpt) DnCNN.eval() decoder.load_state_dict(torch.load(decoder_path)) if config.dataset == "CIFAR10": CalcuSSIM = MS_SSIM(window_size=3, data_range=1., levels=4, channel=3).cuda() else: CalcuSSIM = MS_SSIM(data_range=1., levels=4, channel=3).cuda() # start training snr_in = config.SNRs - CHDDIM_config.large_snr matric_aver = 0 mse1_aver = 0 mse2_aver = 0 # sigma_eps_aver=torch.zeros() with tqdm(testLoader, dynamic_ncols=True) as tqdmtestLoader: for i, (images, labels) in enumerate(tqdmtestLoader): # train x_0 = images.cuda() feature, _ = encoder(x_0) y = feature y_0 = y y, pwr, h = pass_channel.forward(y, snr_in) # normalize sigma_square = 1.0 / (2 * 10 ** (config.SNRs / 10)) if config.channel_type == "awgn": y_awgn = torch.cat((torch.real(y), torch.imag(y)), dim=2) mse1 = torch.nn.MSELoss()(y_awgn * math.sqrt(2), y_0 * math.sqrt(2) / torch.sqrt(pwr)) receive=y_awgn elif config.channel_type == &#39;rayleigh&#39;: y_mmse = y * torch.conj(h) / (torch.abs(h) ** 2 + sigma_square * 2) y_mmse = torch.cat((torch.real(y_mmse), torch.imag(y_mmse)), dim=2) mse1 = torch.nn.MSELoss()(y_mmse * math.sqrt(2), y_0 * math.sqrt(2) / torch.sqrt(pwr)) receive=y_mmse else: raise ValueError feature_hat = receive-DnCNN(receive) mse2 = torch.nn.MSELoss()(feature_hat * math.sqrt(2), y_0 * math.sqrt(2) / torch.sqrt(pwr)) feature_hat = feature_hat * torch.sqrt(pwr) x_0_hat = decoder(feature_hat) # optimizer1.step() # optimizer2.step() if config.loss_function == "MSE": mse = torch.nn.MSELoss()(x_0 * 255., x_0_hat.clamp(0., 1.) * 255) psnr = 10 * math.log10(255. * 255. / mse.item()) matric = psnr #save_image(x_0_hat,"/home/wutong/semdif_revise/DIV2K_JSCCCDDM_rayleigh_PSNR_10dB/{}.png".format(i)) elif config.loss_function == "MSSSIM": msssim = 1 - CalcuSSIM(x_0, x_0_hat.clamp(0., 1.)).mean().item() matric = msssim #save_image(x_0_hat,"/home/wutong/semdif_revise/DIV2K_JSCCCDDM_rayleigh_MSSSIM_10dB/{}.png".format(i)) mse1_aver += mse1.item() mse2_aver += mse2.item() matric_aver += matric CBR = feature.numel() / 2 / x_0.numel() tqdmtestLoader.set_postfix(ordered_dict={ "dataset": config.dataset, "re_weight":str(CHDDIM_config.re_weight), "state": &#39;eval JSCC with CDDM&#39; + config.loss_function, "channel": config.channel_type, "noise_schedule":CHDDIM_config.noise_schedule, "CBR": CBR, "SNR": snr_in, "matric ": matric, "MSE_channel": mse1.item(), "MSE_channel+CDDM": mse2.item(), "T_max":CHDDIM_config.t_max }) mse1_aver = (mse1_aver / (i + 1)) mse2_aver = (mse2_aver / (i + 1)) matric_aver = (matric_aver / (i + 1)) if config.loss_function == "MSE": name = &#39;PSNR&#39; elif config.loss_function == "MSSSIM": name = "MSSSIM" else: raise ValueError #print("matric:{}",matric_aver) myclient = pymongo.MongoClient(config.database_address) mydb = myclient[config.dataset] if &#39;SNRs&#39; in config.encoder_path: mycol = mydb[name + &#39;_&#39; + config.channel_type + &#39;_SNRs_&#39; + &#39;JSCC+CDDM&#39; + &#39;_CBR_&#39; + str(CBR)] mydic = {&#39;SNR&#39;: snr_in, name: matric_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) mycol = mydb[&#39;MSE&#39; + name + &#39;_&#39; + config.channel_type + &#39;_SNRs_&#39; + &#39;JSCC&#39; + &#39;_CBR_&#39; + str(CBR)] mydic = {&#39;SNR&#39;: snr_in, &#39;MSE&#39;: mse1_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) mycol = mydb["MSE" + name + &#39;_&#39; + config.channel_type + &#39;_SNRs_&#39; + &#39;JSCC+CDDM&#39; + &#39;_CBR_&#39; + str(CBR)] mydic = {&#39;SNR&#39;: snr_in, &#39;MSE&#39;: mse2_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) elif &#39;CBRs&#39; in config.encoder_path: mycol = mydb[name + &#39;_&#39; + config.channel_type + &#39;_CBRs_&#39; + &#39;JSCC+CDDM&#39; + &#39;_SNR_&#39; + str(snr_in)] mydic = {&#39;CBR&#39;: CBR, name: matric_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) mycol = mydb[&#39;MSE&#39; + name + &#39;_&#39; + config.channel_type + &#39;_CBRs_&#39; + &#39;JSCC&#39; + &#39;_SNR_&#39; + str(snr_in)] mydic = {&#39;CBR&#39;: CBR, &#39;MSE&#39;: mse1_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) mycol = mydb["MSE" + name + &#39;_&#39; + config.channel_type + &#39;_CBRs_&#39; + &#39;JSCC+CDDM&#39; + &#39;_SNR_&#39; + str(snr_in)] mydic = {&#39;CBR&#39;: CBR, &#39;MSE&#39;: mse2_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) else: raise ValueError def train_GAN(config,CHDDIM_config): from WGANVGG.networks import WGAN_VGG, WGAN_VGG_generator train_losses = [] encoder = network.JSCC_encoder(config, config.C).cuda() encoder_path = config.encoder_path pass_channel = channel.Channel(config) encoder.eval() GAN_config=copy.deepcopy(config) GAN_config.batch_size=config.CDDM_batch trainLoader, _ = get_loader(GAN_config) WGAN_VGG=WGAN_VGG(input_size=16,in_channels=config.C).cuda() WGAN_VGG_generator=WGAN_VGG_generator() criterion_perceptual = torch.nn.L1Loss() optimizer_g = torch.optim.Adam(WGAN_VGG.generator.parameters(), CHDDIM_config.lr) optimizer_d = torch.optim.Adam(WGAN_VGG.discriminator.parameters(), CHDDIM_config.lr) encoder.load_state_dict(torch.load(encoder_path)) for e in range(CHDDIM_config.epoch): with tqdm(trainLoader, dynamic_ncols=True) as tqdmDataLoader: for images, labels in tqdmDataLoader: snr = config.SNRs - CHDDIM_config.large_snr x_0 = images.cuda() feature, _ = encoder(x_0) y = feature y, pwr, h = pass_channel.forward(y, snr) # normalize sigma_square = 1.0 / (2 * 10 ** (config.SNRs / 10)) if config.channel_type == "awgn": y_awgn = torch.cat((torch.real(y), torch.imag(y)), dim=2) #mse1 = torch.nn.MSELoss()(y_awgn * math.sqrt(2), y * math.sqrt(2) / torch.sqrt(pwr)) receive=y_awgn*torch.sqrt(pwr) elif config.channel_type == &#39;rayleigh&#39;: y_mmse = y * torch.conj(h) / (torch.abs(h) ** 2 + sigma_square * 2) y_mmse = torch.cat((torch.real(y_mmse), torch.imag(y_mmse)), dim=2) #mse1 = torch.nn.MSELoss()(y_mmse * math.sqrt(2), y * math.sqrt(2) / torch.sqrt(pwr)) receive=y_mmse*torch.sqrt(pwr) else: raise ValueError for index_2 in range(GAN_config.n_d_train): optimizer_d.zero_grad() #WGAN_VGG.discriminator.zero_grad() #mse1 = torch.nn.MSELoss()(receive / torch.sqrt(pwr) * math.sqrt(2), feature * math.sqrt(2) / torch.sqrt(pwr)) #print(mse1.item()) d_loss, gp_loss = WGAN_VGG.d_loss(receive, feature, gp=True, return_gp=True) d_loss.backward(retain_graph=True) optimizer_d.step() optimizer_g.zero_grad() g_loss,p_loss= WGAN_VGG.g_loss(receive, feature, perceptual=True, return_p=True) #print(pwr) g_loss.backward() optimizer_g.step() tqdmDataLoader.set_postfix(ordered_dict={ "epoch": e, "state": &#39;train_GAN&#39;, "channel type":config.channel_type, "g loss: ": g_loss.item()-p_loss.item(), "p loss: ": p_loss.item(), "d loss: ": d_loss.item(), "d-gp loss: ":d_loss.item()-gp_loss.item(), "gp loss: ":gp_loss.item(), "input shape: ": x_0.shape, "CBR": feature.numel() / 2 / x_0.numel(), }) if (e + 1) % CHDDIM_config.save_model_freq == 0: torch.save(WGAN_VGG.state_dict(), CHDDIM_config.save_path) def eval_JSCC_with_GAN(config, CHDDIM_config): from WGANVGG.networks import WGAN_VGG, WGAN_VGG_generator encoder = network.JSCC_encoder(config, config.C).cuda() decoder = network.JSCC_decoder(config, config.C).cuda() encoder_path = config.encoder_path decoder_path = config.re_decoder_path pass_channel = channel.Channel(config) encoder.eval() decoder.eval() _, testLoader = get_loader(config) WGAN_VGG=WGAN_VGG(input_size=16,in_channels=config.C).cuda() # encoder = torch.nn.DataParallel(encoder, device_ids=CHDDIM_config.device_ids) # decoder = torch.nn.DataParallel(decoder, device_ids=CHDDIM_config.device_ids) # CHDDIM = torch.nn.DataParallel(CHDDIM, device_ids=CHDDIM_config.device_ids) # # encoder = encoder.cuda(device=CHDDIM_config.device_ids[0]) # decoder = decoder.cuda(device=CHDDIM_config.device_ids[0]) # CHDDIM = CHDDIM.cuda(device=CHDDIM_config.device_ids[0]) encoder.load_state_dict(torch.load(encoder_path)) ckpt = torch.load(CHDDIM_config.save_path) WGAN_VGG.load_state_dict(ckpt) WGAN_VGG.eval() decoder.load_state_dict(torch.load(decoder_path)) if config.dataset == "CIFAR10": CalcuSSIM = MS_SSIM(window_size=3, data_range=1., levels=4, channel=3).cuda() else: CalcuSSIM = MS_SSIM(data_range=1., levels=4, channel=3).cuda() # start training snr_in = config.SNRs - CHDDIM_config.large_snr matric_aver = 0 mse1_aver = 0 mse2_aver = 0 # sigma_eps_aver=torch.zeros() with tqdm(testLoader, dynamic_ncols=True) as tqdmtestLoader: for i, (images, labels) in enumerate(tqdmtestLoader): # train x_0 = images.cuda() feature, _ = encoder(x_0) y = feature y_0 = y y, pwr, h = pass_channel.forward(y, snr_in) # normalize sigma_square = 1.0 / (2 * 10 ** (snr_in / 10)) if config.channel_type == "awgn": y_awgn = torch.cat((torch.real(y), torch.imag(y)), dim=2) mse1 = torch.nn.MSELoss()(y_awgn * math.sqrt(2), y_0 * math.sqrt(2) / torch.sqrt(pwr)) receive=y_awgn*torch.sqrt(pwr) elif config.channel_type == &#39;rayleigh&#39;: y_mmse = y * torch.conj(h) / (torch.abs(h) ** 2 + sigma_square * 2) y_mmse = torch.cat((torch.real(y_mmse), torch.imag(y_mmse)), dim=2) mse1 = torch.nn.MSELoss()(y_mmse * math.sqrt(2), y_0 * math.sqrt(2) / torch.sqrt(pwr)) receive=y_mmse*torch.sqrt(pwr) else: raise ValueError feature_hat=WGAN_VGG.generator(receive) mse2 = torch.nn.MSELoss()(feature_hat * math.sqrt(2)/ torch.sqrt(pwr), y_0 * math.sqrt(2) / torch.sqrt(pwr)) # feature_hat = feature_hat * torch.sqrt(pwr) x_0_hat = decoder(feature_hat) # optimizer1.step() # optimizer2.step() if config.loss_function == "MSE": mse = torch.nn.MSELoss()(x_0 * 255., x_0_hat.clamp(0., 1.) * 255) psnr = 10 * math.log10(255. * 255. / mse.item()) matric = psnr #save_image(x_0_hat,"/home/wutong/semdif_revise/DIV2K_JSCCCDDM_rayleigh_PSNR_10dB/{}.png".format(i)) elif config.loss_function == "MSSSIM": msssim = 1 - CalcuSSIM(x_0, x_0_hat.clamp(0., 1.)).mean().item() matric = msssim #save_image(x_0_hat,"/home/wutong/semdif_revise/DIV2K_JSCCCDDM_rayleigh_MSSSIM_10dB/{}.png".format(i)) mse1_aver += mse1.item() mse2_aver += mse2.item() matric_aver += matric CBR = feature.numel() / 2 / x_0.numel() tqdmtestLoader.set_postfix(ordered_dict={ "dataset": config.dataset, "state": &#39;eval JSCC with GAN&#39; + config.loss_function, "channel": config.channel_type, "CBR": CBR, "SNR": snr_in, "matric ": matric, "MSE_channel": mse1.item(), "MSE_channel+GAN": mse2.item(), }) mse1_aver = (mse1_aver / (i + 1)) mse2_aver = (mse2_aver / (i + 1)) matric_aver = (matric_aver / (i + 1)) if config.loss_function == "MSE": name = &#39;PSNR&#39; elif config.loss_function == "MSSSIM": name = "MSSSIM" else: raise ValueError #print("matric:{}",matric_aver) myclient = pymongo.MongoClient(config.database_address) mydb = myclient[config.dataset] if &#39;SNRs&#39; in config.encoder_path: mycol = mydb[name + &#39;_&#39; + config.channel_type + &#39;_SNRs_&#39; + &#39;JSCC+CDDM&#39; + &#39;_CBR_&#39; + str(CBR)] mydic = {&#39;SNR&#39;: snr_in, name: matric_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) mycol = mydb[&#39;MSE&#39; + name + &#39;_&#39; + config.channel_type + &#39;_SNRs_&#39; + &#39;JSCC&#39; + &#39;_CBR_&#39; + str(CBR)] mydic = {&#39;SNR&#39;: snr_in, &#39;MSE&#39;: mse1_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) mycol = mydb["MSE" + name + &#39;_&#39; + config.channel_type + &#39;_SNRs_&#39; + &#39;JSCC+CDDM&#39; + &#39;_CBR_&#39; + str(CBR)] mydic = {&#39;SNR&#39;: snr_in, &#39;MSE&#39;: mse2_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) elif &#39;CBRs&#39; in config.encoder_path: mycol = mydb[name + &#39;_&#39; + config.channel_type + &#39;_CBRs_&#39; + &#39;JSCC+CDDM&#39; + &#39;_SNR_&#39; + str(snr_in)] mydic = {&#39;CBR&#39;: CBR, name: matric_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) mycol = mydb[&#39;MSE&#39; + name + &#39;_&#39; + config.channel_type + &#39;_CBRs_&#39; + &#39;JSCC&#39; + &#39;_SNR_&#39; + str(snr_in)] mydic = {&#39;CBR&#39;: CBR, &#39;MSE&#39;: mse1_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) mycol = mydb["MSE" + name + &#39;_&#39; + config.channel_type + &#39;_CBRs_&#39; + &#39;JSCC+CDDM&#39; + &#39;_SNR_&#39; + str(snr_in)] mydic = {&#39;CBR&#39;: CBR, &#39;MSE&#39;: mse2_aver} mycol.insert_one(mydic) print(&#39;writing successfully&#39;, mydic) else: raise ValueError def train_JSCC_with_GAN(config, CHDDIM_config): from WGANVGG.networks import WGAN_VGG, WGAN_VGG_generator encoder = network.JSCC_encoder(config, config.C).cuda() decoder = network.JSCC_decoder(config, config.C).cuda() encoder_path = config.encoder_path decoder_path = config.decoder_path pass_channel = channel.Channel(config) trainLoader, _ = get_loader(config) encoder.eval() WGAN_VGG=WGAN_VGG(input_size=16,in_channels=config.C).cuda() # encoder = torch.nn.DataParallel(encoder, device_ids=CHDDIM_config.device_ids) # decoder = torch.nn.DataParallel(decoder, device_ids=CHDDIM_config.device_ids) # CHDDIM = torch.nn.DataParallel(CHDDIM, device_ids=CHDDIM_config.device_ids) # # encoder = encoder.cuda(device=CHDDIM_config.device_ids[0]) # decoder = decoder.cuda(device=CHDDIM_config.device_ids[0]) # CHDDIM = CHDDIM.cuda(device=CHDDIM_config.device_ids[0]) encoder.load_state_dict(torch.load(encoder_path)) decoder.load_state_dict(torch.load(decoder_path)) ckpt = torch.load(CHDDIM_config.save_path) WGAN_VGG.load_state_dict(ckpt) WGAN_VGG.eval() # optimizer_encoder = torch.optim.AdamW( # encoder.parameters(), lr=CHDDIM_config.lr, weight_decay=1e-4) optimizer_decoder = torch.optim.Adam( decoder.parameters(), lr=CHDDIM_config.lr) # start training if config.dataset == "CIFAR10": CalcuSSIM = MS_SSIM(window_size=3, data_range=1., levels=4, channel=3).cuda() else: CalcuSSIM = MS_SSIM(data_range=1., levels=4, channel=3).cuda() for e in range(config.retrain_epoch): with tqdm(trainLoader, dynamic_ncols=True) as tqdmtrainLoader: for i, (images, labels) in enumerate(tqdmtrainLoader): # train snr = config.SNRs - CHDDIM_config.large_snr x_0 = images.cuda() feature, _ = encoder(x_0) y = feature y, pwr, h = pass_channel.forward(y, snr) # normalize sigma_square = 1.0 / (2 * 10 ** (config.SNRs / 10)) if config.channel_type == "awgn": y_awgn = torch.cat((torch.real(y), torch.imag(y)), dim=2) #mse1 = torch.nn.MSELoss()(y_awgn * math.sqrt(2), y * math.sqrt(2) / torch.sqrt(pwr)) receive=y_awgn* torch.sqrt(pwr) elif config.channel_type == &#39;rayleigh&#39;: y_mmse = y * torch.conj(h) / (torch.abs(h) ** 2 + sigma_square * 2) y_mmse = torch.cat((torch.real(y_mmse), torch.imag(y_mmse)), dim=2) #mse1 = torch.nn.MSELoss()(y_mmse * math.sqrt(2), y * math.sqrt(2) / torch.sqrt(pwr)) receive=y_mmse* torch.sqrt(pwr) else: raise ValueError feature_hat = WGAN_VGG.generator(receive) #feature_hat = feature_hat * torch.sqrt(pwr) x_0_hat = decoder(feature_hat) # mse1=torch.nn.MSEloss()() if config.loss_function == "MSE": loss = torch.nn.MSELoss()(x_0, x_0_hat) elif config.loss_function == "MSSSIM": loss = CalcuSSIM(x_0, x_0_hat.clamp(0., 1.)).mean() else: raise ValueError optimizer_decoder.zero_grad() loss.backward() optimizer_decoder.step() # optimizer_encoder.step() if config.loss_function == "MSE": mse = torch.nn.MSELoss()(x_0 * 255., x_0_hat.clamp(0., 1.) * 255) psnr = 10 * math.log10(255. * 255. / mse.item()) matric = psnr elif config.loss_function == "MSSSIM": msssim = 1 - CalcuSSIM(x_0, x_0_hat.clamp(0., 1.)).mean().item() matric = msssim tqdmtrainLoader.set_postfix(ordered_dict={ "dataset": config.dataset, "state": "train_decoder", "noise_schedule":"GAN", "channel": config.channel_type, "CBR:": feature.numel() / 2 / x_0.numel(), "SNR": snr, "matric": matric, }) if (e + 1) % config.retrain_save_model_freq == 0: torch.save(decoder.state_dict(), config.re_decoder_path) class netCDDM(nn.Module): def __init__(self,config,CHDDIM_config): super().__init__() self.CDDM=UNet(T=CHDDIM_config.T, ch=CHDDIM_config.channel, ch_mult=CHDDIM_config.channel_mult, attn=CHDDIM_config.attn, num_res_blocks=CHDDIM_config.num_res_blocks, dropout=CHDDIM_config.dropout, input_channel=CHDDIM_config.C).cuda() def forward(self,input): h = torch.sqrt(torch.normal(mean=0.0, std=1, size=np.shape(input)) ** 2 + torch.normal(mean=0.0, std=1, size=np.shape(input)) ** 2) / np.sqrt(2) h = h.cuda() t = input.new_ones([input.shape[0], ], dtype=torch.long) * 100 t=t.cuda() x=self.CDDM(input,t,h) return x class netJSCC(nn.Module): def __init__(self,config,CHDDIM_config): super().__init__() self.encoder = network.JSCC_encoder(config, config.C).cuda() self.decoder = network.JSCC_decoder(config, config.C).cuda() def forward(self,input): x,_=self.encoder(input) y=self.decoder(x) return y def test_mem_and_comp(config,CHDDIM_config): from thop import profile from thop import clever_format network=netJSCC(config,CHDDIM_config) input=torch.randn(1,3,256,256).cuda() macs,params=profile(network,inputs=(input,)) macs, params = clever_format([macs, params], "%.3f") print(macs,params)
08-14
# E:\AI_System\core\config.py import os import yaml from pathlib import Path from typing import Any, Dict, Optional, Union import logging from .exceptions import ConfigError class ConfigManager: """配置管理系统,支持环境变量、配置文件和默认值""" def __init__(self, base_dir: str, env_prefix: str = "AI_SYSTEM_"): """ 初始化配置管理器 Args: base_dir: 基础目录路径 env_prefix: 环境变量前缀 """ self.base_dir = Path(base_dir) self.env_prefix = env_prefix self.config = {} self.logger = logging.getLogger("ConfigManager") # 创建必要目录 self._create_necessary_dirs() # 加载配置 self._load_config() def _create_necessary_dirs(self): """创建系统必需的目录结构""" required_dirs = [ self.base_dir / "data", self.base_dir / "logs", self.base_dir / "config", self.base_dir / "models", self.base_dir / "cache" ] for dir_path in required_dirs: dir_path.mkdir(parents=True, exist_ok=True) self.logger.info(f"确保目录存在: {dir_path}") def _load_config(self): """加载配置 - 优先级:环境变量 > 配置文件 > 默认值""" # 1. 加载默认配置 self.config = self._get_default_config() # 2. 加载配置文件 config_path = self.base_dir / "config" / "system_config.yaml" if config_path.exists(): try: with open(config_path, &#39;r&#39;, encoding=&#39;utf-8&#39;) as f: file_config = yaml.safe_load(f) or {} self._merge_config(file_config) self.logger.info(f"从文件加载配置: {config_path}") except Exception as e: self.logger.error(f"配置文件解析错误: {str(e)}") raise ConfigError(f"配置文件解析错误: {str(e)}") else: self.logger.warning(f"配置文件不存在: {config_path}") # 3. 加载环境变量 self._load_env_vars() # 4. 验证必需配置项 self._validate_required_config() def _get_default_config(self) -> Dict[str, Any]: """获取默认配置""" return { # 系统核心配置 "SYSTEM_NAME": "AI_System", "LOG_LEVEL": "INFO", "LOG_DIR": str(self.base_dir / "logs"), # 线程和进程配置 "MAX_WORKERS": max(1, os.cpu_count() * 2), # 默认CPU核心数*2 "MAX_THREADS_PER_WORKER": 10, # 网络配置 "API_HOST": "0.0.0.0", "API_PORT": 5000, "API_TIMEOUT": 30, # 资源限制 "MEMORY_LIMIT_MB": 4096, "CPU_LIMIT_PERCENT": 80, # 智能体配置 "AGENT_TYPE": "autonomous", "AGENT_UPDATE_INTERVAL": 60, # 秒 "AGENT_MAX_TASKS": 100, # 数据库配置 "DB_PATH": str(self.base_dir / "data" / "system.db"), "DB_BACKUP_INTERVAL": 3600, # 秒 # 调试配置 "DEBUG_MODE": False, "PROFILE_PERFORMANCE": False } def _merge_config(self, new_config: Dict[str, Any]): """合并配置(递归更新字典)""" def merge_dict(base: Dict, update: Dict): for key, value in update.items(): if isinstance(value, dict) and key in base and isinstance(base[key], dict): merge_dict(base[key], value) else: base[key] = value merge_dict(self.config, new_config) def _load_env_vars(self): """从环境变量加载配置""" for key, value in self.config.items(): env_key = f"{self.env_prefix}{key}" env_value = os.environ.get(env_key) if env_value is not None: # 尝试转换类型 if isinstance(value, bool): self.config[key] = env_value.lower() in (&#39;true&#39;, &#39;1&#39;, &#39;yes&#39;) elif isinstance(value, int): try: self.config[key] = int(env_value) except ValueError: self.logger.warning(f"环境变量 {env_key} 无法转换为整数: {env_value}") elif isinstance(value, float): try: self.config[key] = float(env_value) except ValueError: self.logger.warning(f"环境变量 {env_key} 无法转换为浮点数: {env_value}") else: self.config[key] = env_value self.logger.info(f"从环境变量加载: {key}={self.config[key]}") def _validate_required_config(self): """验证必需配置项是否存在""" required_keys = [ "MAX_WORKERS", "API_HOST", "API_PORT", "DB_PATH", "LOG_DIR", "LOG_LEVEL" ] missing_keys = [key for key in required_keys if key not in self.config] if missing_keys: error_msg = f"缺少必需配置项: {&#39;, &#39;.join(missing_keys)}" self.logger.error(error_msg) raise ConfigError(error_msg) def get(self, key: str, default: Optional[Any] = None) -> Any: """获取配置值""" return self.config.get(key, default) def __getattr__(self, key: str) -> Any: """通过属性访问配置""" if key in self.config: return self.config[key] raise AttributeError(f"配置项 &#39;{key}&#39; 不存在") def __getitem__(self, key: str) -> Any: """通过索引访问配置""" if key in self.config: return self.config[key] raise KeyError(f"配置项 &#39;{key}&#39; 不存在") def __contains__(self, key: str) -> bool: """检查配置项是否存在""" return key in self.config def save_config(self, file_path: Optional[Union[str, Path]] = None): """保存当前配置到文件""" if file_path is None: file_path = self.base_dir / "config" / "system_config.yaml" file_path = Path(file_path) file_path.parent.mkdir(parents=True, exist_ok=True) try: with open(file_path, &#39;w&#39;, encoding=&#39;utf-8&#39;) as f: yaml.safe_dump(self.config, f, sort_keys=False, allow_unicode=True) self.logger.info(f"配置已保存到: {file_path}") return True except Exception as e: self.logger.error(f"保存配置失败: {str(e)}") return False def reload(self): """重新加载配置""" self.logger.info("重新加载配置...") self._load_config() self.logger.info("配置重新加载完成") def print_config(self, show_all: bool = False): """打印当前配置(安全地隐藏敏感信息)""" sensitive_keys = ["API_KEY", "DB_PASSWORD", "SECRET_KEY"] print("\n当前系统配置:") for key, value in self.config.items(): if key in sensitive_keys and not show_all: print(f"{key}: {&#39;*&#39; * 8}") else: print(f"{key}: {value}") print() # 全局配置实例 _system_config = None def get_config(base_dir: Optional[str] = None, env_prefix: str = "AI_SYSTEM_") -> ConfigManager: """获取全局配置实例(单例模式)""" global _system_config if _system_config is None: if base_dir is None: # 尝试从环境变量获取基础目录 base_dir = os.environ.get(f"{env_prefix}BASE_DIR", os.getcwd()) _system_config = ConfigManager(base_dir=base_dir, env_prefix=env_prefix) return _system_config # 示例使用 if __name__ == "__main__": # 初始化配置 config = get_config(base_dir=os.getcwd()) # 访问配置 print("API端口:", config.API_PORT) print("最大工作线程数:", config.MAX_WORKERS) # 打印配置 config.print_config() # 保存配置 config.save_config() # 测试代码 def test_config_system(): # 设置测试环境变量 os.environ["AI_SYSTEM_API_PORT"] = "8080" os.environ["AI_SYSTEM_DEBUG_MODE"] = "true" # 创建临时目录 import tempfile with tempfile.TemporaryDirectory() as tmpdir: # 创建配置文件 config_path = Path(tmpdir) / "config" / "system_config.yaml" config_path.parent.mkdir(parents=True, exist_ok=True) with open(config_path, &#39;w&#39;) as f: f.write("MAX_WORKERS: 16\nLOG_LEVEL: DEBUG") # 初始化配置 config = ConfigManager(base_dir=tmpdir, env_prefix="AI_SYSTEM_") # 验证配置 assert config.API_PORT == 8080 # 来自环境变量 assert config.MAX_WORKERS == 16 # 来自配置文件 assert config.DEBUG_MODE is True # 环境变量转换 assert config.LOG_LEVEL == "DEBUG" # 来自配置文件 # 测试默认值 assert config.MEMORY_LIMIT_MB == 4096 # 默认值 print("所有测试通过!") if __name__ == "__main__": test_config_system() # E:\AI_System\core\config.py import os import yaml from pathlib import Path from typing import Any, Dict, Optional, Union import logging from .exceptions import ConfigError class ConfigManager: """配置管理系统,支持环境变量、配置文件和默认值""" def __init__(self, base_dir: str, env_prefix: str = "AI_SYSTEM_"): """ 初始化配置管理器 Args: base_dir: 基础目录路径 env_prefix: 环境变量前缀 """ self.base_dir = Path(base_dir) self.env_prefix = env_prefix self.config = {} self.logger = logging.getLogger("ConfigManager") # 创建必要目录 self._create_necessary_dirs() # 加载配置 self._load_config() def _create_necessary_dirs(self): """创建系统必需的目录结构""" required_dirs = [ self.base_dir / "data", self.base_dir / "logs", self.base_dir / "config", self.base_dir / "models", self.base_dir / "cache" ] for dir_path in required_dirs: dir_path.mkdir(parents=True, exist_ok=True) self.logger.info(f"确保目录存在: {dir_path}") def _load_config(self): """加载配置 - 优先级:环境变量 > 配置文件 > 默认值""" # 1. 加载默认配置 self.config = self._get_default_config() # 2. 加载配置文件 config_path = self.base_dir / "config" / "system_config.yaml" if config_path.exists(): try: with open(config_path, &#39;r&#39;, encoding=&#39;utf-8&#39;) as f: file_config = yaml.safe_load(f) or {} self._merge_config(file_config) self.logger.info(f"从文件加载配置: {config_path}") except Exception as e: self.logger.error(f"配置文件解析错误: {str(e)}") raise ConfigError(f"配置文件解析错误: {str(e)}") else: self.logger.warning(f"配置文件不存在: {config_path}") # 3. 加载环境变量 self._load_env_vars() # 4. 验证必需配置项 self._validate_required_config() def _get_default_config(self) -> Dict[str, Any]: """获取默认配置""" return { # 系统核心配置 "SYSTEM_NAME": "AI_System", "LOG_LEVEL": "INFO", "LOG_DIR": str(self.base_dir / "logs"), # 线程和进程配置 "MAX_WORKERS": max(1, os.cpu_count() * 2), # 默认CPU核心数*2 "MAX_THREADS_PER_WORKER": 10, # 网络配置 "API_HOST": "0.0.0.0", "API_PORT": 5000, "API_TIMEOUT": 30, # 资源限制 "MEMORY_LIMIT_MB": 4096, "CPU_LIMIT_PERCENT": 80, # 智能体配置 "AGENT_TYPE": "autonomous", "AGENT_UPDATE_INTERVAL": 60, # 秒 "AGENT_MAX_TASKS": 100, # 数据库配置 "DB_PATH": str(self.base_dir / "data" / "system.db"), "DB_BACKUP_INTERVAL": 3600, # 秒 # 调试配置 "DEBUG_MODE": False, "PROFILE_PERFORMANCE": False } def _merge_config(self, new_config: Dict[str, Any]): """合并配置(递归更新字典)""" def merge_dict(base: Dict, update: Dict): for key, value in update.items(): if isinstance(value, dict) and key in base and isinstance(base[key], dict): merge_dict(base[key], value) else: base[key] = value merge_dict(self.config, new_config) def _load_env_vars(self): """从环境变量加载配置""" for key, value in self.config.items(): env_key = f"{self.env_prefix}{key}" env_value = os.environ.get(env_key) if env_value is not None: # 尝试转换类型 if isinstance(value, bool): self.config[key] = env_value.lower() in (&#39;true&#39;, &#39;1&#39;, &#39;yes&#39;) elif isinstance(value, int): try: self.config[key] = int(env_value) except ValueError: self.logger.warning(f"环境变量 {env_key} 无法转换为整数: {env_value}") elif isinstance(value, float): try: self.config[key] = float(env_value) except ValueError: self.logger.warning(f"环境变量 {env_key} 无法转换为浮点数: {env_value}") else: self.config[key] = env_value self.logger.info(f"从环境变量加载: {key}={self.config[key]}") def _validate_required_config(self): """验证必需配置项是否存在""" required_keys = [ "MAX_WORKERS", "API_HOST", "API_PORT", "DB_PATH", "LOG_DIR", "LOG_LEVEL" ] missing_keys = [key for key in required_keys if key not in self.config] if missing_keys: error_msg = f"缺少必需配置项: {&#39;, &#39;.join(missing_keys)}" self.logger.error(error_msg) raise ConfigError(error_msg) def get(self, key: str, default: Optional[Any] = None) -> Any: """获取配置值""" return self.config.get(key, default) def __getattr__(self, key: str) -> Any: """通过属性访问配置""" if key in self.config: return self.config[key] raise AttributeError(f"配置项 &#39;{key}&#39; 不存在") def __getitem__(self, key: str) -> Any: """通过索引访问配置""" if key in self.config: return self.config[key] raise KeyError(f"配置项 &#39;{key}&#39; 不存在") def __contains__(self, key: str) -> bool: """检查配置项是否存在""" return key in self.config def save_config(self, file_path: Optional[Union[str, Path]] = None): """保存当前配置到文件""" if file_path is None: file_path = self.base_dir / "config" / "system_config.yaml" file_path = Path(file_path) file_path.parent.mkdir(parents=True, exist_ok=True) try: with open(file_path, &#39;w&#39;, encoding=&#39;utf-8&#39;) as f: yaml.safe_dump(self.config, f, sort_keys=False, allow_unicode=True) self.logger.info(f"配置已保存到: {file_path}") return True except Exception as e: self.logger.error(f"保存配置失败: {str(e)}") return False def reload(self): """重新加载配置""" self.logger.info("重新加载配置...") self._load_config() self.logger.info("配置重新加载完成") def print_config(self, show_all: bool = False): """打印当前配置(安全地隐藏敏感信息)""" sensitive_keys = ["API_KEY", "DB_PASSWORD", "SECRET_KEY"] print("\n当前系统配置:") for key, value in self.config.items(): if key in sensitive_keys and not show_all: print(f"{key}: {&#39;*&#39; * 8}") else: print(f"{key}: {value}") print() # 全局配置实例 _system_config = None def get_config(base_dir: Optional[str] = None, env_prefix: str = "AI_SYSTEM_") -> ConfigManager: """获取全局配置实例(单例模式)""" global _system_config if _system_config is None: if base_dir is None: # 尝试从环境变量获取基础目录 base_dir = os.environ.get(f"{env_prefix}BASE_DIR", os.getcwd()) _system_config = ConfigManager(base_dir=base_dir, env_prefix=env_prefix) return _system_config # 示例使用 if __name__ == "__main__": # 初始化配置 config = get_config(base_dir=os.getcwd()) # 访问配置 print("API端口:", config.API_PORT) print("最大工作线程数:", config.MAX_WORKERS) # 打印配置 config.print_config() # 保存配置 config.save_config() # 测试代码 def test_config_system(): # 设置测试环境变量 os.environ["AI_SYSTEM_API_PORT"] = "8080" os.environ["AI_SYSTEM_DEBUG_MODE"] = "true" # 创建临时目录 import tempfile with tempfile.TemporaryDirectory() as tmpdir: # 创建配置文件 config_path = Path(tmpdir) / "config" / "system_config.yaml" config_path.parent.mkdir(parents=True, exist_ok=True) with open(config_path, &#39;w&#39;) as f: f.write("MAX_WORKERS: 16\nLOG_LEVEL: DEBUG") # 初始化配置 config = ConfigManager(base_dir=tmpdir, env_prefix="AI_SYSTEM_") # 验证配置 assert config.API_PORT == 8080 # 来自环境变量 assert config.MAX_WORKERS == 16 # 来自配置文件 assert config.DEBUG_MODE is True # 环境变量转换 assert config.LOG_LEVEL == "DEBUG" # 来自配置文件 # 测试默认值 assert config.MEMORY_LIMIT_MB == 4096 # 默认值 print("所有测试通过!") if __name__ == "__main__": test_config_system() Microsoft Windows [版本 10.0.22631.2861] (c) Microsoft Corporation。保留所有权利。 C:\Users\Administrator>cd /d E:\AI_System\web_ui E:\AI_System\web_ui>python server.py Traceback (most recent call last): File "E:\AI_System\web_ui\server.py", line 49, in <module> from core.config import CoreConfig as SystemConfig File "E:\AI_System\core\__init__.py", line 5, in <module> from .config import CoreConfig File "E:\AI_System\core\config.py", line 7, in <module> from .exceptions import ConfigError ImportError: cannot import name &#39;ConfigError&#39; from &#39;core.exceptions&#39; (E:\AI_System\core\exceptions.py) E:\AI_System\web_ui>
08-13
# coding: utf-8 import json from time import sleep, time from pickle import dump, load from os.path import exists from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.desired_capabilities import DesiredCapabilities class Concert(object): def __init__(self, config_path=&#39;./config.json&#39;): with open(config_path, &#39;r&#39;, encoding=&#39;utf-8&#39;) as f: config = json.load(f) self.date = config[&#39;date&#39;] self.session = config[&#39;sess&#39;] self.price = config[&#39;price&#39;] self.real_name = config[&#39;real_name&#39;] self.nick_name = config[&#39;nick_name&#39;] self.ticket_num = config[&#39;ticket_num&#39;] self.damai_url = config[&#39;damai_url&#39;] self.target_url = config[&#39;target_url&#39;] self.driver_path = config[&#39;driver_path&#39;] self.driver = None self.status = 0 self.time_start = 0 self.time_end = 0 self.num = 0 self.setup_driver() # 初始化浏览器驱动 def setup_driver(self): """设置WebDriver,并预加载cookies,如果存在的话。""" options = webdriver.ChromeOptions() prefs = { "profile.managed_default_content_settings.images": 2, "profile.managed_default_content_settings.javascript": 1, &#39;permissions.default.stylesheet&#39;: 2 } options.add_experimental_option("prefs", prefs) capa = DesiredCapabilities.CHROME capa["pageLoadStrategy"] = "none" self.driver = webdriver.Chrome(executable_path=self.driver_path, options=options, desired_capabilities=capa) if exists(&#39;cookies.pkl&#39;): self.driver.get(self.damai_url) # 需要先访问网站才能设置cookies cookies = load(open("cookies.pkl", "rb")) for cookie in cookies: if &#39;expiry&#39; in cookie: del cookie[&#39;expiry&#39;] self.driver.add_cookie(cookie) print(u&#39;Cookie载入完成&#39;) else: print(u"Cookie未找到,请先运行get_cookie方法获取") def check_order(self): if self.status in [3, 4, 5]: if self.real_name is not None: print(u"###等待--确认订单--页面出现,可自行刷新,若长期不跳转可选择-- CRTL+C --重新抢票###") try: tb = WebDriverWait(self.driver, 1, 0.1).until(EC.presence_of_element_located((By.XPATH, &#39;/html/body/div[3]/div[2]/div&#39;))) except: raise Exception(u"***Error:实名信息选择框没有显示***") print(u&#39;###开始确认订单###&#39;) print(u&#39;###选择购票人信息,可手动帮助点击###&#39;) init_sleeptime = 0.0 Labels = tb.find_elements_by_tag_name(&#39;label&#39;) # 防止点击过快导致没有选择多个人 while True: init_sleeptime += 0.1 true_num = 0 for num_people in self.real_name: tag_input = Labels[num_people-1].find_element_by_tag_name(&#39;input&#39;) if tag_input.get_attribute(&#39;aria-checked&#39;) == &#39;false&#39;: sleep(init_sleeptime) tag_input.click() else: true_num += 1 if true_num == len(self.real_name): break print("本次抢票时间:", time()-self.time_start) self.driver.find_element_by_xpath(&#39;/html/body/div[3]/div[2]/div/div[9]/button&#39;).click() # 同意以上协议并提交订单 修复Concert类的setup_driver方法缺少缩进导致结构错误,且没有实现完整的get_cookie方法或其他入口函数来启动流程。 并写出修复后完完整整的代码
07-15
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值