uio_interrupt / user space interrupt

UIO设备与UIO框架交互详解
本文深入探讨了UIO设备与UIO框架之间的交互机制,详细介绍了UIO设备的能力结构,包括设备信息、内存映射、中断处理等功能,并通过结构体定义展示了如何在特定UIO内核驱动中实现这些功能。
TO BE DONE

//drivers/uio
//###################################
//       uio framework
//-----------------------------------
//  uio based specific driver
//###################################

//==================================================================================
// the uio base specific driver just need to implement the uio_info infra-structure.
// so the struct uio-info is the interface that uio framework exposes to the specific
// uio based kernel driver.
//===================================================================================

//[uio_device]+---->[uio_info]

struct uio_device {
        struct module           *owner;
        struct device           *dev;
        int                     minor;
        atomic_t                event;
        struct fasync_struct    *async_queue;
        wait_queue_head_t       wait;
        int                     vma_count;
        struct uio_info         *info;
        struct kobject          *map_dir;
        struct kobject          *portio_dir;
};

/**
 * struct uio_info - UIO device capabilities
 * @uio_dev:            the UIO device this info belongs to
 * @name:               device name
 * @version:            device driver version
 * @mem:                list of mappable memory regions, size==0 for end of list
 * @port:               list of port regions, size==0 for end of list
 * @irq:                interrupt number or UIO_IRQ_CUSTOM
 * @irq_flags:          flags for request_irq()
 * @priv:               optional private data
 * @handler:            the device's irq handler
 * @mmap:               mmap operation for this uio device
 * @open:               open operation for this uio device
 * @release:            release operation for this uio device
 * @irqcontrol:         disable/enable irqs when 0/1 is written to /dev/uioX
 */
struct uio_info {
        struct uio_device       *uio_dev;
        const char              *name;
        const char              *version;
        struct uio_mem          mem[MAX_UIO_MAPS];
        struct uio_port         port[MAX_UIO_PORT_REGIONS];
        long                    irq;
        unsigned long           irq_flags;
        void                    *priv;
        irqreturn_t (*handler)(int irq, struct uio_info *dev_info);
        int (*mmap)(struct uio_info *info, struct vm_area_struct *vma);
        int (*open)(struct uio_info *info, struct inode *inode);
        int (*release)(struct uio_info *info, struct inode *inode);
        int (*irqcontrol)(struct uio_info *info, s32 irq_on);
};


[ref] uio-howto.pdf
/* * Vhost-user RDMA device : init and packets forwarding * * Copyright (C) 2025 KylinSoft Inc. and/or its affiliates. All rights reserved. * * Author: Xiong Weimin <xiongweimin@kylinos.cn> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <unistd.h> #include <sys/uio.h> #include <stdlib.h> #include <rte_ethdev.h> #include <rte_spinlock.h> #include <rte_malloc.h> #include "vhost_rdma.h" #include "vhost_rdma_ib.h" #include "vhost_rdma_log.h" #include "vhost_rdma_pkt.h" void free_rd_atomic_resource(__rte_unused struct vhost_rdma_qp *qp, struct vhost_rdma_resp_res *res) { if (res->type == VHOST_ATOMIC_MASK) { rte_pktmbuf_free(res->atomic.mbuf); } else if (res->type == VHOST_READ_MASK) { if (res->read.mr) vhost_rdma_drop_ref(res->read.mr, qp->dev, mr); } res->type = 0; } void free_rd_atomic_resources(struct vhost_rdma_qp *qp) { if (qp->resp.resources) { for (int i = 0; i < qp->attr.max_dest_rd_atomic; i++) { struct vhost_rdma_resp_res *res = &qp->resp.resources[i]; free_rd_atomic_resource(qp, res); } rte_free(qp->resp.resources); qp->resp.resources = NULL; } } void vhost_rdma_queue_cleanup(struct vhost_rdma_qp *qp, struct vhost_rdma_queue* queue) { rte_intr_callback_unregister(&queue->intr_handle, queue->cb, qp); rte_free(queue->data); } void vhost_rdma_mr_cleanup(void* arg) { struct vhost_rdma_mr *mr = arg; mr->type = VHOST_MR_TYPE_NONE; } void vhost_rdma_qp_cleanup(void* arg) { struct vhost_rdma_qp *qp = arg; if (qp->scq) vhost_rdma_drop_ref(qp->scq, qp->dev, cq); if (qp->rcq) vhost_rdma_drop_ref(qp->rcq, qp->dev, cq); if (qp->pd) vhost_rdma_drop_ref(qp->pd, qp->dev, pd); if (qp->resp.mr) { vhost_rdma_drop_ref(qp->resp.mr, qp->dev, mr); qp->resp.mr = NULL; } free_rd_atomic_resources(qp); } void vhost_rdma_init_ib(struct vhost_rdma_device *dev) { uint32_t qpn; dev->attr.max_qps = 64; dev->attr.max_cqs = 64; dev->attr.max_mr_size = -1ull; dev->attr.page_size_cap = 0xfffff000; dev->attr.max_qp_wr = 1024; dev->attr.device_cap_flags = VIRTIO_IB_DEVICE_RC_RNR_NAK_GEN; dev->attr.max_send_sge = 32; dev->attr.max_recv_sge = 32; dev->attr.max_sge_rd = 32; dev->attr.max_cqe = 1024; dev->attr.max_mr = 0x00001000; dev->attr.max_mw = 0; dev->attr.max_pd = 0x7ffc; dev->attr.max_qp_rd_atom = 128; dev->attr.max_qp_init_rd_atom = 128; dev->attr.max_ah = 100; dev->attr.max_fast_reg_page_list_len = 512; dev->attr.local_ca_ack_delay = 15; /* rdma device config */ dev->rdma_config.phys_port_cnt = 1; dev->rdma_config.device_cap_flag = dev->attr.device_cap_flags; dev->rdma_config.hw_ver = 1; dev->rdma_config.local_ca_ack_delay = dev->attr.local_ca_ack_delay; dev->rdma_config.max_ah = dev->attr.max_ah; dev->rdma_config.max_cq = dev->attr.max_cqs; dev->rdma_config.max_cqe = dev->attr.max_cqe; dev->rdma_config.max_fast_reg_page_list_len = dev->attr.max_fast_reg_page_list_len; dev->rdma_config.max_mcast_grp = 8192UL; dev->rdma_config.max_mcast_qp_attach = 56UL; dev->rdma_config.max_mr = dev->attr.max_mr; dev->rdma_config.max_mr_size = dev->attr.max_mr_size; dev->rdma_config.max_mw = dev->attr.max_mw; dev->rdma_config.max_pd = dev->attr.max_pd; dev->rdma_config.max_pi_fast_reg_page_list_len = dev->attr.max_fast_reg_page_list_len / 2; dev->rdma_config.max_pkeys = 1; dev->rdma_config.max_qp = dev->attr.max_qps; dev->rdma_config.max_qp_init_rd_atom = dev->attr.max_qp_init_rd_atom; dev->rdma_config.max_qp_rd_atom = dev->attr.max_qp_rd_atom; dev->rdma_config.max_qp_wr = dev->attr.max_qp_wr; dev->rdma_config.max_recv_sge = dev->attr.max_recv_sge; dev->rdma_config.max_res_rd_atom = dev->rdma_config.max_qp_rd_atom * dev->rdma_config.max_qp; dev->rdma_config.max_send_sge = dev->attr.max_send_sge; dev->rdma_config.max_sge_rd = dev->attr.max_sge_rd; dev->rdma_config.max_total_mcast_qp_attach = dev->rdma_config.max_mcast_grp * dev->rdma_config.max_mcast_qp_attach; dev->rdma_config.page_size_cap = dev->attr.page_size_cap; dev->rdma_config.phys_port_cnt = 1; dev->rdma_config.sys_image_guid = 1; dev->rdma_config.vendor_id = 0x1af4; dev->rdma_config.vendor_part_id = 0x0042; dev->max_inline_data = dev->attr.max_send_sge * sizeof(struct vhost_user_rdma_sge); dev->mtu_cap = ib_mtu_enum_to_int(DEFAULT_IB_MTU); dev->port_attr.bad_pkey_cntr = 0; dev->port_attr.qkey_viol_cntr = 0; for (int i = 0; i < VHOST_MAX_GID_TBL_LEN; i++) { dev->gid_tbl[i].type = VHOST_RDMA_GID_TYPE_ILLIGAL; } dev->cq_vqs = &dev->rdma_vqs[1]; dev->qp_vqs = &dev->rdma_vqs[1 + dev->attr.max_cqs]; vhost_rdma_pool_init(&dev->pd_pool, "pd_pool", dev->attr.max_pd, sizeof(struct vhost_rdma_pd), false, NULL); vhost_rdma_pool_init(&dev->mr_pool, "mr_pool", dev->attr.max_mr, sizeof(struct vhost_rdma_mr), false, vhost_rdma_mr_cleanup); vhost_rdma_pool_init(&dev->cq_pool, "cq_pool", dev->attr.max_cqs, sizeof(struct vhost_rdma_cq), true, NULL); vhost_rdma_pool_init(&dev->qp_pool, "qp_pool", dev->attr.max_qps, sizeof(struct vhost_rdma_qp), false, vhost_rdma_qp_cleanup); vhost_rdma_pool_init(&dev->ah_pool, "ah_pool", dev->attr.max_ah, sizeof(struct vhost_rdma_av), false, NULL); dev->qp_gsi = vhost_rdma_pool_alloc(&dev->qp_pool, &qpn); vhost_rdma_add_ref(dev->qp_gsi); assert(qpn == 1); } void vhost_rdma_destroy_ib(struct vhost_rdma_device *dev) { struct vhost_rdma_mr *mr; struct vhost_rdma_pd *pd; struct vhost_rdma_cq *cq; struct vhost_rdma_qp *qp; struct vhost_rdma_av *av; uint32_t i = 0; for (i = 0; i < dev->attr.max_mr; i++) { mr = vhost_rdma_pool_get(&dev->mr_pool, i); if (mr) vhost_rdma_pool_free(&dev->mr_pool, i); } for (i = 0; i < dev->attr.max_pd; i++) { pd = vhost_rdma_pool_get(&dev->pd_pool, i); if (pd) vhost_rdma_pool_free(&dev->pd_pool, i); } for (i = 0; i < dev->attr.max_cqs; i++) { cq = vhost_rdma_pool_get(&dev->cq_pool, i); if (cq) vhost_rdma_pool_free(&dev->cq_pool, i); } for (i = 0; i < dev->attr.max_qps; i++) { qp = vhost_rdma_pool_get(&dev->qp_pool, i); if (qp) { vhost_rdma_queue_cleanup(qp, &qp->sq.queue); vhost_rdma_queue_cleanup(qp, &qp->rq.queue); vhost_rdma_pool_free(&dev->qp_pool, i); } } for (i = 0; i < dev->attr.max_ah; i++) { av = vhost_rdma_pool_get(&dev->ah_pool, i); if (av) vhost_rdma_pool_free(&dev->ah_pool, i); } vhost_rdma_pool_destroy(&dev->mr_pool); vhost_rdma_pool_destroy(&dev->pd_pool); vhost_rdma_pool_destroy(&dev->cq_pool); vhost_rdma_pool_destroy(&dev->qp_pool); vhost_rdma_pool_destroy(&dev->ah_pool); } void vhost_rdma_handle_ctrl_vq(void* arg) { } int vhost_rdma_task_scheduler(void *arg) { return 0; } 这段 也改一下
最新发布
10-10
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值