Faster Rcnn 源码解析(一)—— anchor_targte_layer.py

本文解析了Faster R-CNN中的AnchorTargetLayer工作原理,包括如何生成锚框、确定正负样本及其权重等关键步骤。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

AnchorTargetLayer层

功能:

得到所有的anchor,根据GT确定每个anchor的标签,并得到anchor与最大IOU的GT的偏移量
个人理解:这里就相当于是得到了每个anchor要学习的目标。

输入:  

bottom: 'rpn_cls_score'#只是为了确定当前feature map的height、width
bottom: 'gt_boxes'框的ground truth[x,y,w,h]
bottom: 'im_info'图片的大小和当前的尺度
bottom: 'data'输入的图片的信息

输出:

top: 'rpn_labels'大小是[1,1,A*height,width],A是anchar的数目
top: 'rpn_bbox_targets'大小是[1,A*4,height,width]:anchor和最高重叠gt的偏移量
top: 'rpn_bbox_inside_weights'大小是[1,A*4,height,width]:被抽中的正类为1,其他为0。在做回归的时候只对前景做
top: 'rpn_bbox_outside_weights'大小是[1,A*4,height,width]:外部权重,目前负例的外部权重=正例的外部权重=1/Nreg

流程:

(1)根据feature map的大小和_feat_stride得到all_anchors,大小是(K*A),这里feat_stride=16,可以理解为rpn_cls_score映射到原图的坐标点,K=height*width。
 (2)过滤不在图片内部的得到anchors。
 (3)计算anchors和gt_boxes的overlap,判断K*A个那些为正,那些为负。
 (4)最后labels中存在的是抽样的,抽128个fg,正样本不够128,负样本多取点,凑够256个。正样本=1,负样本=0,不用的赋值为-1。
 (5)计算rpn_bbox_targets,rpn_bbox_inside_weights,rpn_bbox_outside_weights。

源码:

# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------

import os
import caffe
import yaml
from fast_rcnn.config import cfg
import numpy as np
import numpy.random as npr
from generate_anchors import generate_anchors
from utils.cython_bbox import bbox_overlaps
from fast_rcnn.bbox_transform import bbox_transform

DEBUG = False

class AnchorTargetLayer(caffe.Layer):
    """
    Assign anchors to ground-truth targets. Produces anchor classification
    labels and bounding-box regression targets.
    """

    def setup(self, bottom, top):
        layer_params = yaml.load(self.param_str_)
        anchor_scales = layer_params.get('scales', (8, 16, 32))
        self._anchors = generate_anchors(scales=np.array(anchor_scales))#相关代码可以参考这个博客
        self._num_anchors = self._anchors.shape[0]# 9个
        self._feat_stride = layer_params['feat_stride']#对应于generate_anchors中的base_size是16

        if DEBUG:
            print 'anchors:'
            print self._anchors
            print 'anchor shapes:'#打印每个anchor的形状(width,height)
            print np.hstack((
                self._anchors[:, 2::4] - self._anchors[:, 0::4],
                self._anchors[:, 3::4] - self._anchors[:, 1::4],
            ))
            self._counts = cfg.EPS #config.py 里面的一个参数
            self._sums = np.zeros((1, 4))
            self._squared_sums = np.zeros((1, 4))
            self._fg_sum = 0
            self._bg_sum = 0
            self._count = 0

        # allow boxes to sit over the edge by a small amount
        self._allowed_border = layer_params.get('allowed_border', 0)

        height, width = bottom[0].data.shape[-2:]
        if DEBUG:
            print 'AnchorTargetLayer: height', height, 'width', width

        A = self._num_anchors #一般设置为9.
        # 定义输出
        # 在这里将top的维度结构reshape
        # labels
        top[0].reshape(1, 1, A * height, width)
        # bbox_targets
        top[1].reshape(1, A * 4, height, width)
        # bbox_inside_weights
        top[2].reshape(1, A * 4, height, width)
        # bbox_outside_weights
        top[3].reshape(1, A * 4, height, width)

    def forward(self, bottom, top):
        # Algorithm:
        #
        # for each (H, W) location i
        #   generate 9 anchor boxes centered on cell i
        #   apply predicted bbox deltas at cell i to each of the 9 anchors
        # filter out-of-image anchors
        # measure GT overlap

        assert bottom[0].data.shape[0] == 1, \
            'Only single item batches are supported'

        # feature map of shape (..., H, W),特征图的大小
        height, width = bottom[0].data.shape[-2:]
        # GT boxes (x1, y1, x2, y2, label)
        gt_boxes = bottom[1].data
        # im_info
        im_info = bottom[2].data[0, :]

        if DEBUG:
            print ''
            print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
            print 'scale: {}'.format(im_info[2])
            print 'height, width: ({}, {})'.format(height, width)
            print 'rpn: gt_boxes.shape', gt_boxes.shape
            print 'rpn: gt_boxes', gt_boxes

        # 1. Generate proposals from bbox deltas and shifted anchors
        shift_x = np.arange(0, width) * self._feat_stride #x方向的偏移量大小
        shift_y = np.arange(0, height) * self._feat_stride #y方向的偏移量大小
        #以_feat_stride = 16为例 :
        # shift_x =(0, 16, 32,...,width*_feat_stride),
        #shift_y =(0, 16, 32,...,height*_feat_stride),

        # shift_x,shift_y均为width×height的二维数组,若width*height = 39×64
        shift_x, shift_y = np.meshgrid(shift_x, shift_y)
        # 对应位置的元素组合即构成图像上需要偏移量大小(偏移量大小是相对与图像最左上角的那9个anchor的偏移量大小)
        # 也就是说总共会得到2496个偏移值对。这些偏移值对与初始的anchor相加即可得到所有的anchors,
        # 总共会产生2496×9个anchors,且存储在all_anchors变量中
        shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
                            shift_x.ravel(), shift_y.ravel())).transpose()
        # add A anchors (1, A, 4) to
        # cell K shifts (K, 1, 4) to get
        # shift anchors (K, A, 4)
        # reshape to (K*A, 4) shifted anchors
        A = self._num_anchors
        K = shifts.shape[0]
        all_anchors = (self._anchors.reshape((1, A, 4)) +
                       shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
        all_anchors = all_anchors.reshape((K * A, 4))
        total_anchors = int(K * A)

        # only keep anchors inside the image
        inds_inside = np.where(
            (all_anchors[:, 0] >= -self._allowed_border) &
            (all_anchors[:, 1] >= -self._allowed_border) &
            (all_anchors[:, 2] < im_info[1] + self._allowed_border) &  # width
            (all_anchors[:, 3] < im_info[0] + self._allowed_border)    # height
        )[0]

        if DEBUG:
            print 'total_anchors', total_anchors
            print 'inds_inside', len(inds_inside)

        # keep only inside anchors
        #得到所有在图像边界内部anchors
        anchors = all_anchors[inds_inside, :]
        if DEBUG:
            print 'anchors.shape', anchors.shape

        # label: 1 is positive, 0 is negative, -1 is dont care
        #产生与anchors对应大小的label,初始化为-1.
        labels = np.empty((len(inds_inside), ), dtype=np.float32)
        labels.fill(-1)

        # overlaps between the anchors and the gt boxes
        # overlaps (ex, gt)
        # 这里overlaps是计算所有anchor与ground-truth的重合度,
        # 它是一个len(anchors) x len(gt_boxes)的二维数组,每个元素是各个anchor和gt_boxes的overlap值
        # overlap = (重合部分面积) / (anchor面积 + gt_boxes面积 - 重合部分面积)
        overlaps = bbox_overlaps(
            np.ascontiguousarray(anchors, dtype=np.float),#返回一个连续的浮点型数组。
            np.ascontiguousarray(gt_boxes, dtype=np.float))

        # argmax_overlaps是每个anchor对应最大overlap的gt_boxes的下标,返回的是每一行的最大值,行向量
        # max_overlaps是每个anchor对应最大的overlap值
        argmax_overlaps = overlaps.argmax(axis=1)
        max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]

        # gt_argmax_overlaps是每个gt_boxes对应最大overlap的anchor的下标,返回的是每一列的最大值,行向量
        # gt_max_overlaps是每个gt_boxes对应最大的overlap值
        gt_argmax_overlaps = overlaps.argmax(axis=0)
        gt_max_overlaps = overlaps[gt_argmax_overlaps,
                                   np.arange(overlaps.shape[1])]

        # 加上这一步是因为有很多overlap并列第一,要把所有的都找出来
        gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
        #接下来就是根据overlap的值确定每个anchor是前景还是背景

        # RPN_CLOBBER_POSITIVES = false,先按照RPN_NEGATIVE_OVERLAP挑选bg,这样bg可能变成fg;
        # RPN_CLOBBER_POSITIVES = true,最后挑选bg,这样fg可能变成bg;
        if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
            # assign bg labels first so that positive labels can clobber them
            labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0

        # fg label: for each gt, anchor with highest overlap
        # 对于某个gt,overlap最大的anchor为1
        labels[gt_argmax_overlaps] = 1

        # fg label: above threshold IOU
        # 对于某个anchor,其overlap超过阈值为1
        labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1

        if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
            # assign bg labels last so that negative labels can clobber positives
            labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0

        # subsample positive labels if we have too many
        # 接下来是确定正负样本的数量
        # RPN_FG_FRACTION:rpn样本数中,fg的比例 RPN_BATCHSIZE:rpn样本数
        num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)#训练需要的正样本的个数
        fg_inds = np.where(labels == 1)[0]#所有的正样本的个数
        #如果正样本的数量太多,随机挑选一本份多余的置为-1(无效).
        if len(fg_inds) > num_fg:
            disable_inds = npr.choice(
                fg_inds, size=(len(fg_inds) - num_fg), replace=False)
            labels[disable_inds] = -1

        # subsample negative labels if we have too many
        num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)#训练需要的负样本的个数
        bg_inds = np.where(labels == 0)[0]#所有的负样本的个数
        # 如果正样本的数量太多,随机挑选一本份多余的置为-1(无效).
        if len(bg_inds) > num_bg:
            disable_inds = npr.choice(
                bg_inds, size=(len(bg_inds) - num_bg), replace=False)
            labels[disable_inds] = -1
            #print "was %s inds, disabling %s, now %s inds" % (
                #len(bg_inds), len(disable_inds), np.sum(labels == 0))
        # 这里将计算每一个anchor与重合度最高的ground_truth的偏移值,
        # 详细的计算方法在论文中提到,
        # 在fast-rcnn/bbox_transform.py中的bbox_transform函数也非常容易看懂
        bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
        bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
        # bbox_inside_weights的含义是只计算前景的回归,
        # 所以他的定义就是除了前景为(1, 1, 1, 1),其余的都是(0,0,0,0)
        bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
        bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
        #bbox_outside_weights是为了在函数中加入前景和背景的权重,这里权重相同,都为使用的anchor的数量。
        bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
        if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0: #正负样本具有一样的权重
            # uniform weighting of examples (given non-uniform sampling)
            num_examples = np.sum(labels >= 0) #所有样本的数量
            positive_weights = np.ones((1, 4)) * 1.0 / num_examples
            negative_weights = np.ones((1, 4)) * 1.0 / num_examples
        else:
            assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) & #必须在0-1,否则会抛出异常
                    (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
            positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /
                                np.sum(labels == 1))
            negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /
                                np.sum(labels == 0))
        bbox_outside_weights[labels == 1, :] = positive_weights
        bbox_outside_weights[labels == 0, :] = negative_weights

        if DEBUG:
            #计算正样本的偏移量的均值和方差
            self._sums += bbox_targets[labels == 1, :].sum(axis=0)
            self._squared_sums += (bbox_targets[labels == 1, :] ** 2).sum(axis=0)
            self._counts += np.sum(labels == 1)
            means = self._sums / self._counts
            stds = np.sqrt(self._squared_sums / self._counts - means ** 2)
            print 'means:'
            print means
            print 'stdevs:'
            print stds

        # map up to original set of anchors
        # 还记得文初将all_anchors裁减掉了2/3左右,仅仅保留在图像内的anchor吗,
        # 这里就是将其复原作为下一层的输入了
        labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
        bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
        bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
        bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)

        if DEBUG:
            print 'rpn: max max_overlap', np.max(max_overlaps)
            print 'rpn: num_positive', np.sum(labels == 1)
            print 'rpn: num_negative', np.sum(labels == 0)
            self._fg_sum += np.sum(labels == 1)
            self._bg_sum += np.sum(labels == 0)
            self._count += 1
            print 'rpn: num_positive avg', self._fg_sum / self._count
            print 'rpn: num_negative avg', self._bg_sum / self._count
        #将输出reshape成相应的格式。
        # labels
        labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
        labels = labels.reshape((1, 1, A * height, width))
        top[0].reshape(*labels.shape)
        top[0].data[...] = labels

        # bbox_targets
        bbox_targets = bbox_targets \
            .reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
        top[1].reshape(*bbox_targets.shape)
        top[1].data[...] = bbox_targets

        # bbox_inside_weights
        bbox_inside_weights = bbox_inside_weights \
            .reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
        assert bbox_inside_weights.shape[2] == height
        assert bbox_inside_weights.shape[3] == width
        top[2].reshape(*bbox_inside_weights.shape)
        top[2].data[...] = bbox_inside_weights

        # bbox_outside_weights
        bbox_outside_weights = bbox_outside_weights \
            .reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
        assert bbox_outside_weights.shape[2] == height
        assert bbox_outside_weights.shape[3] == width
        top[3].reshape(*bbox_outside_weights.shape)
        top[3].data[...] = bbox_outside_weights

    def backward(self, top, propagate_down, bottom):
        """This layer does not propagate gradients."""
        pass

    def reshape(self, bottom, top):
        """Reshaping happens during the call to forward."""
        pass

# 输入有两种:一维的labels,e二维的bbox_targets,bbox_inside_weights,bbox_outside_weights
# labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
# bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
# bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
# bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
def _unmap(data, count, inds, fill=0):
    """ Unmap a subset of item (data) back to the original set of items (of
    size count) """
    if len(data.shape) == 1:
        ret = np.empty((count, ), dtype=np.float32)
        ret.fill(fill)
        ret[inds] = data
    else:
        ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
        ret.fill(fill)
        ret[inds, :] = data
    return ret


def _compute_targets(ex_rois, gt_rois):
    """Compute bounding-box regression targets for an image."""

    assert ex_rois.shape[0] == gt_rois.shape[0]
    assert ex_rois.shape[1] == 4
    assert gt_rois.shape[1] == 5

    return bbox_transform(ex_rois, gt_rois[:, :4]).astype(np.float32, copy=False)#bbox_transform请戳


代码出现问题:(style_tune) C:\Users\28996\Desktop\AI\persona_contrastive_finetuning>python Contrastive_Training_LM.py INFO:accelerate.utils.modeling:We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk). trainable params: 1,572,864 || all params: 1,838,401,536 || trainable%: 0.0856 训练集样本示例: {'anchor_input_ids': [56568, 118919, 116122, 11319], 'positive_input_ids': [116122, 20412, 107340, 9370, 100357, 102323, 3837, 109202, 104078, 103975, 100675, 101940, 100912, 105054, 6313], 'negative_input_ids': [100323, 104307, 99245, 9370, 106059, 104060, 3837, 104530, 115604, 99329, 11319]} 验证集样本示例: {'anchor_input_ids': [56568, 118919, 116122, 11319], 'positive_input_ids': [116122, 20412, 107340, 9370, 100357, 102323, 3837, 109202, 104078, 103975, 100675, 101940, 100912, 105054, 6313], 'negative_input_ids': [100323, 104307, 99245, 9370, 106059, 104060, 3837, 104530, 115604, 99329, 11319]} Trainer.tokenizer is now deprecated. You should use `Trainer.processing_class = processing_class` instead. INFO:__main__:GPU内存使用: 已分配 2.93GB, 保留 4.13GB 可训练参数列表: - base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.0.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.0.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.0.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.1.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.1.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.1.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.1.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.2.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.2.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.2.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.2.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.3.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.3.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.3.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.3.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.4.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.4.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.4.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.4.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.5.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.5.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.5.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.5.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.6.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.6.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.6.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.6.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.7.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.7.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.7.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.7.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.8.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.8.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.8.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.8.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.9.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.9.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.9.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.9.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.10.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.10.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.10.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.10.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.11.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.11.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.11.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.11.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.12.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.12.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.12.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.12.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.13.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.13.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.13.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.13.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.14.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.14.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.14.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.14.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.15.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.15.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.15.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.15.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.16.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.16.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.16.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.16.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.17.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.17.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.17.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.17.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.18.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.18.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.18.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.18.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.19.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.19.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.19.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.19.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.20.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.20.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.20.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.20.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.21.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.21.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.21.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.21.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.22.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.22.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.22.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.22.self_attn.v_proj.lora_B.default.weight - base_model.model.model.layers.23.self_attn.q_proj.lora_A.default.weight - base_model.model.model.layers.23.self_attn.q_proj.lora_B.default.weight - base_model.model.model.layers.23.self_attn.v_proj.lora_A.default.weight - base_model.model.model.layers.23.self_attn.v_proj.lora_B.default.weight 0%| | 0/3 [00:00<?, ?it/s]You're using a Qwen2TokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. INFO:__main__:GPU内存使用: 已分配 4.00GB, 保留 4.21GB Could not estimate the number of tokens of the input, floating-point operations will not be computed Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. INFO:__main__:GPU内存使用: 已分配 4.02GB, 保留 4.22GB 33%|████████████████████████████ | 1/3 [00:03<00:06, 3.25s/it]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. INFO:__main__:GPU内存使用: 已分配 4.01GB, 保留 4.25GB Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. INFO:__main__:GPU内存使用: 已分配 4.02GB, 保留 4.26GB 67%|████████████████████████████████████████████████████████ | 2/3 [00:06<00:02, 2.98s/it]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. INFO:__main__:GPU内存使用: 已分配 4.01GB, 保留 4.25GB Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead. INFO:__main__:GPU内存使用: 已分配 4.02GB, 保留 4.26GB {'train_runtime': 9.034, 'train_samples_per_second': 0.664, 'train_steps_per_second': 0.332, 'train_loss': 1.0772175788879395, 'epoch': 3.0} 100%|████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:09<00:00, 3.01s/it] Traceback (most recent call last): File "C:\Users\28996\Desktop\AI\persona_contrastive_finetuning\Contrastive_Training_LM.py", line 356, in <module> eval_results = trainer.evaluate() File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\transformers\trainer.py", line 4076, in evaluate output = eval_loop( File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\transformers\trainer.py", line 4270, in evaluation_loop losses, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\transformers\trainer.py", line 4496, in prediction_step outputs = model(**inputs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\accelerate\utils\operations.py", line 818, in forward return model_forward(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\accelerate\utils\operations.py", line 806, in __call__ return convert_to_fp32(self.model_forward(*args, **kwargs)) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\torch\amp\autocast_mode.py", line 44, in decorate_autocast return func(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\peft\peft_model.py", line 1719, in forward return self.base_model( File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\peft\tuners\tuners_utils.py", line 197, in forward return self.model.forward(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\transformers\models\qwen2\modeling_qwen2.py", line 816, in forward outputs = self.model( File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\torch\nn\modules\module.py", line 1736, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\torch\nn\modules\module.py", line 1747, in _call_impl return forward_call(*args, **kwargs) File "C:\Users\28996\miniconda3\envs\style_tune\lib\site-packages\transformers\models\qwen2\modeling_qwen2.py", line 521, in forward raise ValueError("You must specify exactly one of input_ids or inputs_embeds") ValueError: You must specify exactly one of input_ids or inputs_embeds (style_tune) C:\Users\28996\Desktop\AI\persona_contrastive_finetuning>python Contrastive_Training_LM.py Traceback (most recent call last): File "C:\Users\28996\Desktop\AI\persona_contrastive_finetuning\Contrastive_Training_LM.py", line 57, in <module> class ContrastiveTrainer(Trainer): File "C:\Users\28996\Desktop\AI\persona_contrastive_finetuning\Contrastive_Training_LM.py", line 63, in ContrastiveTrainer eval_dataset: Optional[Dataset] = None, NameError: name 'Dataset' is not defined 原代码如下:import torch import torch.nn as nn import torch.nn.functional as F from transformers import ( AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, PreTrainedTokenizerBase, BitsAndBytesConfig ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.utils import PaddingStrategy from datasets import load_dataset from typing import Any, Dict, List, Optional, Tuple, Union import logging from dataclasses import dataclass import os import gc from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training @dataclass class EvalDataCollator: """评估专用的数据收集器""" tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None return_tensors: str = "pt" def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]: # 评估时只使用正样本(用于语言建模评估) positive_features = [{"input_ids": f["positive_input_ids"]} for f in features] # 对正样本进行填充 batch_positive = self.tokenizer.pad( positive_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) # 创建注意力掩码 attention_mask = (batch_positive["input_ids"] != self.tokenizer.pad_token_id).int() # 创建标签(用于语言建模) labels = batch_positive["input_ids"].clone() labels[labels == self.tokenizer.pad_token_id] = -100 return { "input_ids": batch_positive["input_ids"], "attention_mask": attention_mask, "labels": labels } class ContrastiveTrainer(Trainer): """内存优化的训练器""" # ... [保持其他方法不变] ... def evaluate( self, eval_dataset: Optional[Dataset] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> Dict[str, float]: """重写评估方法以使用专用的数据收集器""" # 创建评估专用的数据收集器 eval_data_collator = EvalDataCollator( tokenizer=self.tokenizer, max_length=256, padding="max_length" ) # 临时保存原始数据收集器 original_collator = self.data_collator try: # 使用评估专用的数据收集器 self.data_collator = eval_data_collator # 调用父类的评估方法 return super().evaluate( eval_dataset=eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix ) finally: # 恢复原始数据收集器 self.data_collator = original_collator # 设置日志 logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # 内存优化工具函数 def clear_memory(): """清除Python和CUDA缓存""" gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() def print_memory_usage(): """打印当前内存使用情况""" if torch.cuda.is_available(): allocated = torch.cuda.memory_allocated() / (1024 ** 3) reserved = torch.cuda.memory_reserved() / (1024 ** 3) logger.info(f"GPU内存使用: 已分配 {allocated:.2f}GB, 保留 {reserved:.2f}GB") else: logger.info("未检测到GPU") def tokenize_function(examples, tokenizer, max_length=256): """将文本转换为token IDs""" tokenized = {} # 对每个字段进行分词 for key in ['anchor', 'positive', 'negative']: if key in examples: # 使用分词器处理文本 result = tokenizer( examples[key], max_length=max_length, truncation=True, padding=False, return_tensors=None ) tokenized[f"{key}_input_ids"] = result["input_ids"] return tokenized @dataclass class ContrastiveDataCollator: """内存优化的数据收集器""" tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None return_tensors: str = "pt" def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]: # 分离出三元组的各个部分 anchor_features = [{"input_ids": f["anchor_input_ids"]} for f in features] positive_features = [{"input_ids": f["positive_input_ids"]} for f in features] negative_features = [{"input_ids": f["negative_input_ids"]} for f in features] # 对每个部分分别进行填充 batch_anchor = self.tokenizer.pad( anchor_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch_positive = self.tokenizer.pad( positive_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch_negative = self.tokenizer.pad( negative_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) # 创建注意力掩码 def create_attention_mask(input_ids): return (input_ids != self.tokenizer.pad_token_id).int() # 释放中间变量内存 del anchor_features, positive_features, negative_features clear_memory() return { "anchor_input_ids": batch_anchor["input_ids"], "anchor_attention_mask": create_attention_mask(batch_anchor["input_ids"]), "positive_input_ids": batch_positive["input_ids"], "positive_attention_mask": create_attention_mask(batch_positive["input_ids"]), "negative_input_ids": batch_negative["input_ids"], "negative_attention_mask": create_attention_mask(batch_negative["input_ids"]), } class ContrastiveTrainer(Trainer): """内存优化的训练器""" def __init__(self, tokenizer=None, *args, contrastive_config=None, **kwargs): # 首先调用父类初始化 super().__init__(*args, **kwargs) # 关键修复:设置tokenizer self.tokenizer = tokenizer if contrastive_config is None: contrastive_config = {} # 设置默认值 self.temperature = contrastive_config.get("temperature", 0.07) self.margin = contrastive_config.get("margin", 0.3) self.contrastive_weight = contrastive_config.get("weight", 0.8) self.repr_layer = contrastive_config.get("repr_layer", -1) # 验证必要参数 if not hasattr(self.model.config, "output_hidden_states") or not self.model.config.output_hidden_states: raise ValueError("模型必须设置output_hidden_states=True") self.cross_entropy = nn.CrossEntropyLoss() def compute_contrastive_loss(self, anchor_emb, pos_emb, neg_emb): """计算对比损失""" # 计算余弦相似度 pos_sim = F.cosine_similarity(anchor_emb, pos_emb) neg_sim = F.cosine_similarity(anchor_emb, neg_emb) # 计算InfoNCE损失 numerator = torch.exp(pos_sim / self.temperature) denominator = numerator + torch.exp(neg_sim / self.temperature) info_nce_loss = -torch.log(numerator / (denominator + 1e-8)).mean() # 计算三元组损失 triplet_loss = F.relu(neg_sim - pos_sim + self.margin).mean() return info_nce_loss + triplet_loss def get_sequence_representation(self, outputs, attention_mask): """获取序列表示(内存优化版)""" # 只获取需要的隐藏状态层 hidden_states = outputs.hidden_states[self.repr_layer] # 获取每个序列的最后个非填充token seq_lengths = attention_mask.sum(dim=1) - 1 batch_indices = torch.arange(hidden_states.size(0)) # 返回对应位置的隐藏状态 return hidden_states[batch_indices, seq_lengths] def compute_loss(self, model, inputs, return_outputs=False): """内存优化的损失计算""" # 确保模型处于训练模式 model.train() # 提取输入 anchor_ids = inputs["anchor_input_ids"] anchor_mask = inputs["anchor_attention_mask"] positive_ids = inputs["positive_input_ids"] positive_mask = inputs["positive_attention_mask"] negative_ids = inputs["negative_input_ids"] negative_mask = inputs["negative_attention_mask"] # 前向传播获取隐藏状态 def get_embeddings(input_ids, attention_mask): outputs = model( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, return_dict=True ) return self.get_sequence_representation(outputs, attention_mask) # 获取三元组的嵌入表示 anchor_emb = get_embeddings(anchor_ids, anchor_mask) pos_emb = get_embeddings(positive_ids, positive_mask) neg_emb = get_embeddings(negative_ids, negative_mask) # 计算对比损失 cl_loss = self.compute_contrastive_loss(anchor_emb, pos_emb, neg_emb) cl_loss = cl_loss * self.contrastive_weight # 关键修复:确保tokenizer已设置 if self.tokenizer is None: raise ValueError("Tokenizer未设置!") # 计算语言建模损失 lm_labels = positive_ids.clone() # 关键修复:使用tokenizer的pad_token_id pad_token_id = self.tokenizer.pad_token_id lm_labels[lm_labels == pad_token_id] = -100 # 计算语言建模损失 lm_outputs = model( input_ids=positive_ids, attention_mask=positive_mask, labels=lm_labels ) lm_loss = lm_outputs.loss # 总损失 = LM损失 + 对比损失 total_loss = lm_loss + cl_loss # 记录内存使用 print_memory_usage() return (total_loss, lm_outputs) if return_outputs else total_loss # ================ 主程序 ================ # if __name__ == "__main__": # 配置量化以减少内存使用 bnb_config = BitsAndBytesConfig( load_in_4bit=True, # 使用4位量化 bnb_4bit_quant_type="nf4", # 使用NF4量化类型 bnb_4bit_use_double_quant=True, # 双重量化 bnb_4bit_compute_dtype=torch.float16 # 计算使用FP16 ) # 加载模型和分词器(使用量化) model = AutoModelForCausalLM.from_pretrained( "model/Qwen/Qwen1.5-1.8B", quantization_config=bnb_config, # 应用量化配置 device_map="auto", # 自动选择设备 output_hidden_states=True, # 必须设置以获取隐藏状态 return_dict_in_generate=True, use_cache=False # 禁用缓存以节省内存 ) tokenizer = AutoTokenizer.from_pretrained("model/Qwen/Qwen1.5-1.8B") tokenizer.pad_token = tokenizer.eos_token # 设置填充token # 为量化模型添加LoRA适配器 lora_config = LoraConfig( r=8, lora_alpha=32, target_modules=["q_proj", "v_proj"], # 针对Qwen1.5-1.8B模型 lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) # 关键修复:准备模型用于k位训练 model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True) # 添加LoRA适配器 model = get_peft_model(model, lora_config) # 关键修复:显式启用LoRA参数的梯度 for param in model.parameters(): if param.requires_grad: param.requires_grad = True model.print_trainable_parameters() # 打印可训练参数数量 # 加载数据集 def load_and_tokenize_dataset(file_path, tokenizer): """加载数据集并进行分词处理""" # 加载原始数据集 dataset_dict = load_dataset('json', data_files=file_path) raw_dataset = dataset_dict['train'] # 应用分词函数 tokenized_dataset = raw_dataset.map( lambda ex: tokenize_function(ex, tokenizer, max_length=256), batched=True, batch_size=8, # 减小批处理大小 remove_columns=['anchor', 'positive', 'negative'] ) return tokenized_dataset train_dataset = load_and_tokenize_dataset('data/processed/train_style_triplets.json', tokenizer) val_dataset = load_and_tokenize_dataset('data/processed/val_style_triplets.json', tokenizer) # 验证数据集格式 print("训练集样本示例:", train_dataset[0]) print("验证集样本示例:", val_dataset[0]) # 训练参数配置(内存优化) training_args = TrainingArguments( output_dir="./model/lora_adapter", per_device_train_batch_size=1, # 减小批量大小 gradient_accumulation_steps=8, # 增加梯度累积步数 num_train_epochs=3, learning_rate=2e-4, logging_steps=10, # 更频繁的日志记录以监控内存 save_steps=500, fp16=True, report_to="none", remove_unused_columns=False, gradient_checkpointing=True, # 启用梯度检查点 optim="adafactor", # 使用内存更少的优化器 ) # 对比学习配置 contrastive_config = { "temperature": 0.07, "margin": 0.3, "weight": 0.8, "repr_layer": -1 } # 初始化数据收集器 data_collator = ContrastiveDataCollator( tokenizer=tokenizer, max_length=256, # 减少最大长度 padding="max_length" ) # 初始化训练器 - 关键修复:传递tokenizer trainer = ContrastiveTrainer( model=model, args=training_args, tokenizer=tokenizer, # 传递tokenizer data_collator=data_collator, train_dataset=train_dataset, eval_dataset=val_dataset, contrastive_config=contrastive_config ) # 开始训练前打印内存状态 print_memory_usage() # 关键修复:验证可训练参数 print("可训练参数列表:") for name, param in model.named_parameters(): if param.requires_grad: print(f"- {name}") # 开始训练 trainer.train() # 保存LoRA适配器 model.save_pretrained("./model/lora_adapter") # 评估模型 try: eval_results = trainer.evaluate() print("评估结果:", eval_results) except Exception as e: print(f"评估过程中发生错误: {e}") import traceback traceback.print_exc()
最新发布
07-21
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值