libevent 事件的优先级队列

多个事件同时触发时,libevent没有定义各个回调函数的执行顺序。可以使用优先级来定义某些事件比其它事件更重要。

libevent里面的优先级队列就是active队列数组,当epoll返回event时,根据event对应的优先级放入队列的active数组列表中,实现了优先级的概念
这里写图片描述

1. event_Base的优先级队列
libevent中,每个event_base都有与之相关的一个或者多个优先级。可以调用event_base_priority_init设置event_base的优先级数目。
/*npriorities为多少,就代表要建多少个尾队列数组, 
 * base->nactivequeues 表示数据个数
 * base->activequeues表示数组首地址*/
int
event_base_priority_init(struct event_base *base, int npriorities)
{
    int i;

    if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
        || npriorities >= EVENT_MAX_PRIORITIES)
        return (-1);

    if (npriorities == base->nactivequeues)
        return (0);
    /*libevent里面的优先级队列就是active队列数组,当epoll返回event时,根据event对应的优先级放入队列的active数组列表中,实现了优先级的概念
     * 这个优先队列是一个链表头数组,每一个链表头代表一个队列,nactivequeues代表队列的个数,activequeues数组的头指针*/
    /* 这里为啥只要释放base->activequeues, 如果有多个队列怎么办?
     * 估计是默认只有一个优先级队列,所以此处只释放base->activequeues*/
    if (base->nactivequeues) {
        mm_free(base->activequeues);
        base->nactivequeues = 0;
    }

    /*创建npriorities个优先队列*/
    /* Allocate our priority queues */
    base->activequeues = (struct event_list *)
      mm_calloc(npriorities, sizeof(struct event_list));
    if (base->activequeues == NULL) {
        event_warn("%s: calloc", __func__);
        return (-1);
    }
    base->nactivequeues = npriorities;
    /*初始化队列头*/
    for (i = 0; i < base->nactivequeues; ++i) {
        TAILQ_INIT(&base->activequeues[i]);
    }

    return (0);
}
  1. event的优先级
    event的优先级默认是在创建event的时候初始化为base->nactivequeues/2.

    int event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
    {
    if (!base)
        base = current_base;
    
    _event_debug_assert_not_added(ev);
    
    ev->ev_base = base;
    .........
    
    if (base != NULL) {
       /* by default, we put new events into the middle priority */
        ev->ev_pri = base->nactivequeues / 2;
    }
    
    _event_debug_note_setup(ev);
    
    return 0;
    }

    也可以通过调用API event_priority_set 进行设置。

    /* Set’s the priority of an event - if an event is already scheduled

    • changing the priority is going to fail.
      */
int
event_priority_set(struct event *ev, int pri)
{
       _event_debug_assert_is_setup(ev);

    if (ev->ev_flags & EVLIST_ACTIVE)
        return (-1);
    if (pri < 0 || pri >= ev->ev_base->nactivequeues)
        return (-1);

    ev->ev_pri = pri;

    return (0);
}
  1. 添加激活事件到优先队列
    调用epoll_dipath将epoll_wait监测到的事件event通过evmap_io_active加入到优先队列。
static int
epoll_dispatch(struct event_base *base, struct timeval *tv)
{
    struct epollop *epollop = base->evbase;
    struct epoll_event *events = epollop->events;
    int i, res;
    long timeout = -1;

    if (tv != NULL) {
        timeout = evutil_tv_to_msec(tv);
        if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) {
            /* Linux kernels can wait forever if the timeout is
             * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */
            timeout = MAX_EPOLL_TIMEOUT_MSEC;
        }
    }

    epoll_apply_changes(base);
    event_changelist_remove_all(&base->changelist, base);

    EVBASE_RELEASE_LOCK(base, th_base_lock);

    res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);

    EVBASE_ACQUIRE_LOCK(base, th_base_lock);

    if (res == -1) {
        if (errno != EINTR) {
            event_warn("epoll_wait");
            return (-1);
        }

        return (0);
    }

    event_debug(("%s: epoll_wait reports %d", __func__, res));
    EVUTIL_ASSERT(res <= epollop->nevents);

    for (i = 0; i < res; i++) {
        int what = events[i].events;
        short ev = 0;

        if (what & (EPOLLHUP|EPOLLERR)) {
            ev = EV_READ | EV_WRITE;
        } else {
            if (what & EPOLLIN)                
                ev |= EV_READ;
            if (what & EPOLLOUT)
                ev |= EV_WRITE;
        }

        if (!ev)
            continue;

        evmap_io_active(base, events[i].data.fd, ev | EV_ET);
    }

    .......

    return (0);
}

evmap_io_active 通过fd索引对应的hash数组,找到对应event,再调用event_active_nolock将event加入优先队列

void
evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
{
    struct event_io_map *io = &base->io;
    struct evmap_io *ctx;
    struct event *ev;

#ifndef EVMAP_USE_HT
    EVUTIL_ASSERT(fd < io->nentries);
#endif
    GET_IO_SLOT(ctx, io, fd, evmap_io);

    EVUTIL_ASSERT(ctx);
    TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
        if (ev->ev_events & events)
            event_active_nolock(ev, ev->ev_events & events, 1);
    }
} 

event_active_nolock调用event_queue_insert(base, ev, EVLIST_ACTIVE);

void
event_active_nolock(struct event *ev, int res, short ncalls)
{
    struct event_base *base;

    event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
        ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));


    /* We get different kinds of events, add them together */
    if (ev->ev_flags & EVLIST_ACTIVE) {
        ev->ev_res |= res;
        return;
    }

    base = ev->ev_base;

    EVENT_BASE_ASSERT_LOCKED(base);

    ev->ev_res = res;

    if (ev->ev_pri < base->event_running_priority)
        base->event_continue = 1;

    if (ev->ev_events & EV_SIGNAL) {
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
        if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
            ++base->current_event_waiters;
            EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
        }
#endif
        ev->ev_ncalls = ncalls;
        ev->ev_pncalls = NULL;
    }

    event_queue_insert(base, ev, EVLIST_ACTIVE);

    if (EVBASE_NEED_NOTIFY(base))
        evthread_notify_base(base);
}

通过取event的优先级,将event加入到对应的优先队列中

static void
event_queue_insert(struct event_base *base, struct event *ev, int queue)
{
    EVENT_BASE_ASSERT_LOCKED(base);

    if (ev->ev_flags & queue) {
        /* Double insertion is possible for active events */
        if (queue & EVLIST_ACTIVE)
            return;

        event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on queue %x", __func__,
            ev, EV_SOCK_ARG(ev->ev_fd), queue);
        return;
    }

    if (~ev->ev_flags & EVLIST_INTERNAL)
        base->event_count++;

    ev->ev_flags |= queue;
    switch (queue) {
    ........
    case EVLIST_ACTIVE:
        base->event_count_active++;
        TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
            ev,ev_active_next);
        break;
    .......
    default:
        event_errx(1, "%s: unknown queue %x", __func__, queue);
    }
}
  1. 最终调用event_process_active依次处理优先队列中的event
static int
event_process_active(struct event_base *base)
{                                                                                                                                                                                      
    /* Caller must hold th_base_lock */
    struct event_list *activeq = NULL;
    int i, c = 0;
    /*循环优先队列,保证了优先级高(数组索引值小)的队列先执行*/
    for (i = 0; i < base->nactivequeues; ++i) {
        if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
            base->event_running_priority = i;
            activeq = &base->activequeues[i];
            c = event_process_active_single_queue(base, activeq);
            if (c < 0) {
                base->event_running_priority = -1;
                return -1;
            } else if (c > 0)
                break; /* Processed a real event; do not
                    * consider lower-priority events */
            /* If we get here, all of the events we processed
             * were internal.  Continue. */
        }
    }

    event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
    base->event_running_priority = -1;
    return c;
}

最终调用event回调函数进行处理

static int                                                                                                                                                                             
event_process_active_single_queue(struct event_base *base,
    struct event_list *activeq)
{
    struct event *ev;
    int count = 0;

    EVUTIL_ASSERT(activeq != NULL);

    for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
        /*如果event是EV_PERSIST,只是将ev从active队列中移除,
         * 否者不是EV_PERSIST,则调用event_del_internal将ev从active 队列中移除,并从evmap_io或者evmap_signal中删除*/
        if (ev->ev_events & EV_PERSIST)
            event_queue_remove(base, ev, EVLIST_ACTIVE);
        else
            event_del_internal(ev);
        if (!(ev->ev_flags & EVLIST_INTERNAL))
            ++count;

        event_debug((
             "event_process_active: event: %p, %s%scall %p",
            ev,
            ev->ev_res & EV_READ ? "EV_READ " : " ",
            ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
            ev->ev_callback));

#ifndef _EVENT_DISABLE_THREAD_SUPPORT
        base->current_event = ev;
        base->current_event_waiters = 0;
#endif

        switch (ev->ev_closure) {
        case EV_CLOSURE_SIGNAL:
            event_signal_closure(base, ev);
            break;
        case EV_CLOSURE_PERSIST:
            event_persist_closure(base, ev);
            break;
        default:
        case EV_CLOSURE_NONE:
            EVBASE_RELEASE_LOCK(base, th_base_lock);
            /*调用event回调函数*/
            (*ev->ev_callback)(
                ev->ev_fd, ev->ev_res, ev->ev_arg);
            break;
        }

        EVBASE_ACQUIRE_LOCK(base, th_base_lock);
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
        base->current_event = NULL;
        if (base->current_event_waiters) {
            base->current_event_waiters = 0;
            EVTHREAD_COND_BROADCAST(base->current_event_cond);
        }
#endif

        if (base->event_break)
            return -1;
        if (base->event_continue)
            break;
    }
    return count;
}

### 部署 Stable Diffusion 的准备工作 为了成功部署 Stable Diffusion,在本地环境中需完成几个关键准备事项。确保安装了 Python 和 Git 工具,因为这些对于获取源码和管理依赖项至关重要。 #### 安装必要的软件包和支持库 建议创建一个新的虚拟环境来隔离项目的依赖关系。这可以通过 Anaconda 或者 venv 实现: ```bash conda create -n sd python=3.9 conda activate sd ``` 或者使用 `venv`: ```bash python -m venv sd-env source sd-env/bin/activate # Unix or macOS sd-env\Scripts\activate # Windows ``` ### 下载预训练模型 Stable Diffusion 要求有预先训练好的模型权重文件以便能够正常工作。可以从官方资源或者其他可信赖的地方获得这些权重文件[^2]。 ### 获取并配置项目代码 接着要做的就是把最新的 Stable Diffusion WebUI 版本拉取下来。在命令行工具里执行如下指令可以实现这一点;这里假设目标路径为桌面下的特定位置[^3]: ```bash git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git ~/Desktop/stable-diffusion-webui cd ~/Desktop/stable-diffusion-webui ``` ### 设置 GPU 支持 (如果适用) 当打算利用 NVIDIA 显卡加速推理速度时,则需要确认 PyTorch 及 CUDA 是否已经正确设置好。下面这段简单的测试脚本可以帮助验证这一情况[^4]: ```python import torch print(f"Torch version: {torch.__version__}") if torch.cuda.is_available(): print("CUDA is available!") else: print("No CUDA detected.") ``` 一旦上述步骤都顺利完成之后,就可以按照具体文档中的指导进一步操作,比如调整参数、启动服务端口等等。整个过程中遇到任何疑问都可以查阅相关资料或社区支持寻求帮助。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值