我们还是从SoftVerbalizer的初始化方法看起:
def __init__(self,
tokenizer: Optional[PreTrainedTokenizer],
model: Optional[PreTrainedModel],
classes: Optional[List] = None,
num_classes: Optional[Sequence[int]] = None,
label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,
prefix: Optional[str] = " ",
multi_token_handler: Optional[str] = "first",
):
super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)
self.prefix = prefix
self.multi_token_handler = multi_token_handler#对单个word或字拆分成多个部分的处理策略
head_name = [n for n,c in model.named_children()][-1]
logger.info(f"The LM head named {head_name} was retrieved.")
self.head = copy.deepcopy(getattr(model, head_name))
max_loop = 5#下面的最要工作是找到掩码语言模型的Mask层的输出,最高迭代五次,否则表示这个模型没有mask层
if not

最低0.47元/天 解锁文章
1670

被折叠的 条评论
为什么被折叠?



