[Unfinished] 28_α = β_Direct_Current_Non_Loop_Curren

介绍了一种逻辑控制无环流直流可逆调速系统,该系统通过两组反并联的整流器供电,利用逻辑控制器判断何时封锁或启动各组整流器,从而避免环流产生,提高系统效率。

                            逻辑控制无环流直流可逆调速系统:

  两组反并联供电的直流电动机可逆调速系统中,
如果在一组整流器工作
时,封锁状态另一组整流器,即切断这组整流器的触发脉冲,
使这组整流器不
工作,这样两组整流器之间就没有环流通路,既不会产生直流环流也不会产生
脉动环流。

  一般由逻辑控制器(DLC)来判断在正反转或制动过程中哪组整流器应该工作(包括整流和逆变两种状态),那组整流器应该封锁。

  且由于不存在环流和没有环流带来的损耗,整流器的容量可以减小,也不需要限制环流的电抗器。

主电路:
    采用两组整流器反并联方案。

系统的控制电路:

    转速调节器,电流调节器,逻辑控制器等组成,且两组整流器分别由两个电流调节器控制,其中反组整流器 VR 的电流调节器 ACR2 输入经过了倒相器,以确保两组整流器的控制角 α= β。两组整流器的工作或封锁由逻辑控制器控制。

逻辑控制器DCL:

1.    两个输出Ublf 和 Ublr:

    两个输出信号Ublf 和 Ublr 分别通过触发器来控制是否产生触发信号;
输出信号Ublf 和 Ublr 的状态必须始终保持相反,以保证两组整流器不会同时处于工作状态。

2.    两个输入Ui* 和 Ui:
    Ui* 和 Ui 是逻辑控制器判别改变输出信号状态的重要条件。
   
    Ui*的符号改变还不是逻辑控制器判别改变输出信号状态的唯一条件,还必须等待电动机原方向电流减小到零后 ,Ui =0 , 才能关断原来工作的整流器,而开通原封锁的另一组整流器,因此电枢电流下降为零 Ui = 0 是逻辑切换的条件之二。
    只有在Ui*改变极性和 Vi =0 两个条件都满足后,逻辑控制器的输出状态才能改变。

组成:
    逻辑控制器由电平检测、逻辑判断、延时电路和连锁保护四个环节组成。



Unfinished!!!!!!!!!!!!!!!!






#include <stdio.h> #include <stdint.h> #include <stdbool.h> #include <stdlib.h> #include <time.h> // 警告类型定义 #define WARNING_NONE 0 #define WARNING1 1 #define WARNING2 2 #define WARNING3 3 // 显示时间常量 #define T_dis_min 4 // 最小显示时间 #define T_dis_cycle 6 // 周期显示时间 // 显示状态枚举 typedef enum { DISPLAY_MIN_TIME, // 最小显示时间阶段 DISPLAY_CYCLE // 周期显示阶段 } DisplayState; // 系统状态变量 static uint8_t current_warning = WARNING_NONE; static DisplayState display_state = DISPLAY_MIN_TIME; static uint16_t timer = 0; // 警告标志位 static bool flag_warning1 = false; static bool flag_warning2 = false; static bool flag_warning3 = false; // 警告状态(是否已完成最小显示时间) static bool warning1_min_done = false; static bool warning2_min_done = false; static bool warning3_min_done = false; // 边缘检测变量 - 用于检测新警告 static bool last_flag1 = false; static bool last_flag2 = false; static bool last_flag3 = false; // 函数声明 void Reset_System(void); void Set_Warning_Flags(bool w1, bool w2, bool w3); void Print_Current_State(void); uint8_t Get_Highest_Priority_Warning(void); uint8_t Get_Next_Cycle_Warning(uint8_t current); void Warning_Display_Handler(void); bool All_Warnings_Min_Time_Done(void); bool Has_New_Warning(void); void Generate_Random_Warnings(int time_step, float probability); void Print_Warning_Flags(void); // 重置系统状态 void Reset_System(void) { current_warning = WARNING_NONE; display_state = DISPLAY_MIN_TIME; timer = 0; flag_warning1 = false; flag_warning2 = false; flag_warning3 = false; last_flag1 = false; last_flag2 = false; last_flag3 = false; warning1_min_done = false; warning2_min_done = false; warning3_min_done = false; printf("系统已重置\n"); } // 设置警告标志位 void Set_Warning_Flags(bool w1, bool w2, bool w3) { // 更新边缘检测变量 last_flag1 = flag_warning1; last_flag2 = flag_warning2; last_flag3 = flag_warning3; flag_warning1 = w1; flag_warning2 = w2; flag_warning3 = w3; // 重置最小显示完成状态(如果警告消失) if (!w1) warning1_min_done = false; if (!w2) warning2_min_done = false; if (!w3) warning3_min_done = false; } // 打印当前警告标志状态 void Print_Warning_Flags(void) { printf("警告标志: W1=%d, W2=%d, W3=%d\n", flag_warning1, flag_warning2, flag_warning3); } // 检测是否有新警告出现 bool Has_New_Warning(void) { // 检测从无到有的变化 if (!last_flag1 && flag_warning1) return true; if (!last_flag2 && flag_warning2) return true; if (!last_flag3 && flag_warning3) return true; return false; } // 打印当前状态 void Print_Current_State(void) { if (current_warning == WARNING_NONE) { printf("状态: 无警告显示\n"); return; } const char* state_str = (display_state == DISPLAY_MIN_TIME) ? "最小时间" : "周期显示"; printf("状态: 警告%d, 计时器: %d, 阶段: %s\n", current_warning, timer, state_str); } // 获取最高优先级警告 uint8_t Get_Highest_Priority_Warning(void) { if (flag_warning1) return WARNING1; if (flag_warning2) return WARNING2; if (flag_warning3) return WARNING3; return WARNING_NONE; } // 获取下一个要显示的警告(按优先级顺序) uint8_t Get_Next_Cycle_Warning(uint8_t current) { // 如果没有其他警告,返回当前警告(继续显示) if (!flag_warning2 && !flag_warning3 && flag_warning1) return WARNING1; if (!flag_warning1 && !flag_warning3 && flag_warning2) return WARNING2; if (!flag_warning1 && !flag_warning2 && flag_warning3) return WARNING3; // 按优先级顺序循环:1 > 2 > 3 > 1... if (current == WARNING1) { if (flag_warning2) return WARNING2; if (flag_warning3) return WARNING3; return WARNING1; } if (current == WARNING2) { if (flag_warning3) return WARNING3; if (flag_warning1) return WARNING1; return WARNING2; } if (current == WARNING3) { if (flag_warning1) return WARNING1; if (flag_warning2) return WARNING2; return WARNING3; } return Get_Highest_Priority_Warning(); // 默认返回最高优先级 } // 检查是否所有警告都完成了最小显示时间 bool All_Warnings_Min_Time_Done(void) { if (flag_warning1 && !warning1_min_done) return false; if (flag_warning2 && !warning2_min_done) return false; if (flag_warning3 && !warning3_min_done) return false; return true; } // 警告显示状态机 void Warning_Display_Handler(void) { // 0. 检查是否有新警告出现(在可打断阶段) if (display_state == DISPLAY_CYCLE && Has_New_Warning()) { // 查找最高优先级的未完成最小显示的新警告 uint8_t highest_unfinished = WARNING_NONE; if (flag_warning1 && !warning1_min_done) { highest_unfinished = WARNING1; } else if (flag_warning2 && !warning2_min_done) { highest_unfinished = WARNING2; } else if (flag_warning3 && !warning3_min_done) { highest_unfinished = WARNING3; } if (highest_unfinished != WARNING_NONE) { // 中断当前周期显示,切换到新警告的最小显示时间 current_warning = highest_unfinished; display_state = DISPLAY_MIN_TIME; timer = 0; printf("中断当前周期显示,切换到警告%d的最小显示时间\n", current_warning); return; } } // 1. 状态1:无警告显示 if (current_warning == WARNING_NONE) { uint8_t highest = Get_Highest_Priority_Warning(); if (highest != WARNING_NONE) { current_warning = highest; timer = 0; display_state = DISPLAY_MIN_TIME; printf("切换到警告%d (初始)\n", current_warning); } return; } // 2. 检查当前警告是否仍然存在 bool current_warning_exists = false; switch (current_warning) { case WARNING1: current_warning_exists = flag_warning1; break; case WARNING2: current_warning_exists = flag_warning2; break; case WARNING3: current_warning_exists = flag_warning3; break; } // 3. 如果当前警告已消失,切换到下一个警告 if (!current_warning_exists) { printf("警告%d已消失\n", current_warning); uint8_t next = Get_Highest_Priority_Warning(); if (next != WARNING_NONE) { current_warning = next; timer = 0; display_state = DISPLAY_MIN_TIME; printf("切换到警告%d\n", current_warning); } else { current_warning = WARNING_NONE; timer = 0; display_state = DISPLAY_MIN_TIME; printf("无警告显示\n"); } return; } // 4. 状态2:最小显示时间阶段 if (display_state == DISPLAY_MIN_TIME) { timer++; printf("警告%d: 最小时间 %d/%d\n", current_warning, timer, T_dis_min); // 最小显示时间结束 if (timer >= T_dis_min) { // 标记当前警告已完成最小显示时间 switch (current_warning) { case WARNING1: warning1_min_done = true; break; case WARNING2: warning2_min_done = true; break; case WARNING3: warning3_min_done = true; break; } // 检查是否所有警告都完成了最小显示时间 if (All_Warnings_Min_Time_Done()) { // 进入周期显示时,从最高优先级警告开始 current_warning = Get_Highest_Priority_Warning(); display_state = DISPLAY_CYCLE; timer = 0; printf("所有警告完成最小时间,进入周期显示(从最高优先级警告%d开始)\n", current_warning); } else { // 切换到下一个未完成最小显示时间的警告(按优先级顺序) uint8_t next = WARNING_NONE; // 按优先级顺序查找下一个未完成的警告 if (flag_warning1 && !warning1_min_done) { next = WARNING1; } else if (flag_warning2 && !warning2_min_done) { next = WARNING2; } else if (flag_warning3 && !warning3_min_done) { next = WARNING3; } if (next != WARNING_NONE) { current_warning = next; timer = 0; display_state = DISPLAY_MIN_TIME; printf("切换到警告%d\n", current_warning); } else { // 不应该发生的情况 current_warning = Get_Highest_Priority_Warning(); display_state = DISPLAY_CYCLE; timer = 0; printf("错误:找不到未完成最小显示的警告,进入周期显示\n"); } } } return; } // 5. 状态3:周期显示阶段 if (display_state == DISPLAY_CYCLE) { timer++; printf("警告%d: 周期显示 %d/%d\n", current_warning, timer, T_dis_cycle); // 周期显示结束 if (timer >= T_dis_cycle) { // 获取下一个要显示的警告(按优先级顺序) uint8_t next_warning = Get_Next_Cycle_Warning(current_warning); if (next_warning != current_warning) { // 切换到新警告的周期显示阶段 printf("警告%d周期结束,切换到警告%d\n", current_warning, next_warning); current_warning = next_warning; timer = 0; } else { // 没有其他警告,继续显示当前警告 printf("警告%d周期结束,继续显示\n", current_warning); timer = 0; // 重置计时器 } } } } // 随机生成警告标志 void Generate_Random_Warnings(int time_step, float probability) { // 每个警告独立生成 bool w1 = (rand() % 100) < (probability * 100); bool w2 = (rand() % 100) < (probability * 100); bool w3 = (rand() % 100) < (probability * 100); Set_Warning_Flags(w1, w2, w3); } // 主函数 - 随机测试场景 int main() { // 设置随机种子 srand(time(NULL)); printf("===== 随机警告显示系统测试 =====\n"); Reset_System(); // 运行100个时间步 for (int time_step = 0; time_step < 100; time_step++) { printf("\n=== 时间步 %d ===\n", time_step); // 随机生成警告标志(30%概率出现) Generate_Random_Warnings(time_step, 0.3); Print_Warning_Flags(); // 处理警告显示 Warning_Display_Handler(); Print_Current_State(); // 每10个时间步打印一次状态摘要 if (time_step % 10 == 0) { printf("\n[状态摘要] 时间步: %d\n", time_step); printf("当前警告: %d, 阶段: %s\n", current_warning, (display_state == DISPLAY_MIN_TIME) ? "最小时间" : "周期显示"); printf("警告完成状态: W1:%d, W2:%d, W3:%d\n", warning1_min_done, warning2_min_done, warning3_min_done); } } return 0; } 警告随机变化的时间要大于最小显示时间
08-19
分析下面代码的作用from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence import modules.utils as utils from modules.caption_model import CaptionModel def sort_pack_padded_sequence(input, lengths): sorted_lengths, indices = torch.sort(lengths, descending=True) tmp = pack_padded_sequence(input[indices], sorted_lengths.cpu(), batch_first=True) inv_ix = indices.clone() inv_ix[indices] = torch.arange(0, len(indices)).type_as(inv_ix) return tmp, inv_ix def pad_unsort_packed_sequence(input, inv_ix): tmp, _ = pad_packed_sequence(input, batch_first=True) tmp = tmp[inv_ix] return tmp def pack_wrapper(module, att_feats, att_masks): if att_masks is not None: packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1)) return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix) else: return module(att_feats) class AttModel(CaptionModel): def __init__(self, args, tokenizer): super(AttModel, self).__init__() self.args = args self.tokenizer = tokenizer self.vocab_size = len(tokenizer.idx2token) self.input_encoding_size = args.d_model self.rnn_size = args.d_ff self.num_layers = args.num_layers self.drop_prob_lm = args.drop_prob_lm self.max_seq_length = args.max_seq_length self.att_feat_size = args.d_vf self.att_hid_size = args.d_model self.bos_idx = args.bos_idx self.eos_idx = args.eos_idx self.pad_idx = args.pad_idx self.use_bn = args.use_bn self.embed = lambda x: x self.fc_embed = lambda x: x self.att_embed = nn.Sequential(*( ((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ()) + (nn.Linear(self.att_feat_size, self.input_encoding_size), nn.ReLU(), nn.Dropout(self.drop_prob_lm)) + ((nn.BatchNorm1d(self.input_encoding_size),) if self.use_bn == 2 else ()))) def clip_att(self, att_feats, att_masks): # Clip the length of att_masks and att_feats to the maximum length if att_masks is not None: max_len = att_masks.data.long().sum(1).max() att_feats = att_feats[:, :max_len].contiguous() att_masks = att_masks[:, :max_len].contiguous() return att_feats, att_masks def _prepare_feature(self, fc_feats, att_feats, att_masks): att_feats, att_masks = self.clip_att(att_feats, att_masks) # embed fc and att feats fc_feats = self.fc_embed(fc_feats) att_feats = pack_wrapper(self.att_embed, att_feats, att_masks) # Project the attention feats first to reduce memory and computation comsumptions. p_att_feats = self.ctx2att(att_feats) return fc_feats, att_feats, p_att_feats, att_masks def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, output_logsoftmax=1): # 'it' contains a word index xt = self.embed(it) output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks) if output_logsoftmax: logprobs = F.log_softmax(self.logit(output), dim=1) else: logprobs = self.logit(output) return logprobs, state def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}): beam_size = opt.get('beam_size', 10) group_size = opt.get('group_size', 1) sample_n = opt.get('sample_n', 10) # when sample_n == beam_size then each beam is a sample. assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search' batch_size = fc_feats.size(0) p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks) assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed' seq = fc_feats.new_full((batch_size * sample_n, self.max_seq_length), self.pad_idx, dtype=torch.long) seqLogprobs = fc_feats.new_zeros(batch_size * sample_n, self.max_seq_length, self.vocab_size + 1) # lets process every image independently for now, for simplicity self.done_beams = [[] for _ in range(batch_size)] state = self.init_hidden(batch_size) # first step, feed bos it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long) logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state) p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size, [p_fc_feats, p_att_feats, pp_att_feats, p_att_masks] ) self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt) for k in range(batch_size): if sample_n == beam_size: for _n in range(sample_n): seq_len = self.done_beams[k][_n]['seq'].shape[0] seq[k * sample_n + _n, :seq_len] = self.done_beams[k][_n]['seq'] seqLogprobs[k * sample_n + _n, :seq_len] = self.done_beams[k][_n]['logps'] else: seq_len = self.done_beams[k][0]['seq'].shape[0] seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps'] # return the samples and their log likelihoods return seq, seqLogprobs def _sample(self, fc_feats, att_feats, att_masks=None, update_opts={}): opt = self.args.__dict__ opt.update(**update_opts) sample_method = opt.get('sample_method', 'greedy') beam_size = opt.get('beam_size', 1) temperature = opt.get('temperature', 1.0) sample_n = int(opt.get('sample_n', 1)) group_size = opt.get('group_size', 1) output_logsoftmax = opt.get('output_logsoftmax', 1) decoding_constraint = opt.get('decoding_constraint', 0) block_trigrams = opt.get('block_trigrams', 0) if beam_size > 1 and sample_method in ['greedy', 'beam_search']: return self._sample_beam(fc_feats, att_feats, att_masks, opt) if group_size > 1: return self._diverse_sample(fc_feats, att_feats, att_masks, opt) batch_size = fc_feats.size(0) state = self.init_hidden(batch_size * sample_n) p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks) if sample_n > 1: p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n, [p_fc_feats, p_att_feats, pp_att_feats, p_att_masks] ) trigrams = [] # will be a list of batch_size dictionaries seq = fc_feats.new_full((batch_size * sample_n, self.max_seq_length), self.pad_idx, dtype=torch.long) seqLogprobs = fc_feats.new_zeros(batch_size * sample_n, self.max_seq_length, self.vocab_size + 1) for t in range(self.max_seq_length + 1): if t == 0: # input <bos> it = fc_feats.new_full([batch_size * sample_n], self.bos_idx, dtype=torch.long) logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, output_logsoftmax=output_logsoftmax) if decoding_constraint and t > 0: tmp = logprobs.new_zeros(logprobs.size()) tmp.scatter_(1, seq[:, t - 1].data.unsqueeze(1), float('-inf')) logprobs = logprobs + tmp # Mess with trigrams # Copy from https://github.com/lukemelas/image-paragraph-captioning if block_trigrams and t >= 3: # Store trigram generated at last step prev_two_batch = seq[:, t - 3:t - 1] for i in range(batch_size): # = seq.size(0) prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item()) current = seq[i][t - 1] if t == 3: # initialize trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int} elif t > 3: if prev_two in trigrams[i]: # add to list trigrams[i][prev_two].append(current) else: # create list trigrams[i][prev_two] = [current] # Block used trigrams at next step prev_two_batch = seq[:, t - 2:t] mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size for i in range(batch_size): prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item()) if prev_two in trigrams[i]: for j in trigrams[i][prev_two]: mask[i, j] += 1 # Apply mask to log probs # logprobs = logprobs - (mask * 1e9) alpha = 2.0 # = 4 logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best) # sample the next word if t == self.max_seq_length: # skip if we achieve maximum length break it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature) # stop when all finished if t == 0: unfinished = it != self.eos_idx else: it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0 logprobs = logprobs * unfinished.unsqueeze(1).float() unfinished = unfinished * (it != self.eos_idx) seq[:, t] = it seqLogprobs[:, t] = logprobs # quit loop if all sequences have finished if unfinished.sum() == 0: break return seq, seqLogprobs def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}): sample_method = opt.get('sample_method', 'greedy') beam_size = opt.get('beam_size', 1) temperature = opt.get('temperature', 1.0) group_size = opt.get('group_size', 1) diversity_lambda = opt.get('diversity_lambda', 0.5) decoding_constraint = opt.get('decoding_constraint', 0) block_trigrams = opt.get('block_trigrams', 0) batch_size = fc_feats.size(0) state = self.init_hidden(batch_size) p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks) trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries seq_table = [fc_feats.new_full((batch_size, self.max_seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)] seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.max_seq_length) for _ in range(group_size)] state_table = [self.init_hidden(batch_size) for _ in range(group_size)] for tt in range(self.max_seq_length + group_size): for divm in range(group_size): t = tt - divm seq = seq_table[divm] seqLogprobs = seqLogprobs_table[divm] trigrams = trigrams_table[divm] if t >= 0 and t <= self.max_seq_length - 1: if t == 0: # input <bos> it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long) else: it = seq[:, t - 1] # changed logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed logprobs = F.log_softmax(logprobs / temperature, dim=-1) # Add diversity if divm > 0: unaug_logprobs = logprobs.clone() for prev_choice in range(divm): prev_decisions = seq_table[prev_choice][:, t] logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda if decoding_constraint and t > 0: tmp = logprobs.new_zeros(logprobs.size()) tmp.scatter_(1, seq[:, t - 1].data.unsqueeze(1), float('-inf')) logprobs = logprobs + tmp # Mess with trigrams if block_trigrams and t >= 3: # Store trigram generated at last step prev_two_batch = seq[:, t - 3:t - 1] for i in range(batch_size): # = seq.size(0) prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item()) current = seq[i][t - 1] if t == 3: # initialize trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int} elif t > 3: if prev_two in trigrams[i]: # add to list trigrams[i][prev_two].append(current) else: # create list trigrams[i][prev_two] = [current] # Block used trigrams at next step prev_two_batch = seq[:, t - 2:t] mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size for i in range(batch_size): prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item()) if prev_two in trigrams[i]: for j in trigrams[i][prev_two]: mask[i, j] += 1 # Apply mask to log probs # logprobs = logprobs - (mask * 1e9) alpha = 2.0 # = 4 logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best) it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1) # stop when all finished if t == 0: unfinished = it != self.eos_idx else: unfinished = seq[:, t - 1] != self.pad_idx & seq[:, t - 1] != self.eos_idx it[~unfinished] = self.pad_idx unfinished = unfinished & (it != self.eos_idx) # changed seq[:, t] = it seqLogprobs[:, t] = sampleLogprobs.view(-1) return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape( batch_size * group_size, -1)
最新发布
09-24
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值