// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/* Authors: Xiong Weimin <xiongweimin@kylinos.cn> */
/* Copyright 2020.kylinos.cn.All Rights Reserved.*/
#include <linux/virtio_config.h>
#include "vrdma.h"
#include "vrdma_dev_api.h"
#include "vrdma_queue.h"
/**
* init_device_attr - Initialize IB device attributes from virtio config space
* @rdev: Virtio RDMA device
*
* Reads the device configuration fields and populates the InfiniBand device
* attributes (&rdev->ib_dev.attrs). This function must be called during device
* probe after the virtqueue is ready but before registering the IB device.
*/
static void init_device_attr(struct vrdma_dev *rdev)
{
struct ib_device_attr *attr = &rdev->attr;
struct vrdma_config cfg;
/* Zero out attribute structure */
memset(attr, 0, sizeof(*attr));
/* Read entire config at once for efficiency and atomicity */
virtio_cread(rdev->vdev, struct vrdma_config, phys_port_cnt, &cfg.phys_port_cnt);
virtio_cread(rdev->vdev, struct vrdma_config, sys_image_guid, &cfg.sys_image_guid);
virtio_cread(rdev->vdev, struct vrdma_config, vendor_id, &cfg.vendor_id);
virtio_cread(rdev->vdev, struct vrdma_config, vendor_part_id, &cfg.vendor_part_id);
virtio_cread(rdev->vdev, struct vrdma_config, hw_ver, &cfg.hw_ver);
virtio_cread(rdev->vdev, struct vrdma_config, max_mr_size, &cfg.max_mr_size);
virtio_cread(rdev->vdev, struct vrdma_config, page_size_cap, &cfg.page_size_cap);
virtio_cread(rdev->vdev, struct vrdma_config, max_qp, &cfg.max_qp);
virtio_cread(rdev->vdev, struct vrdma_config, max_qp_wr, &cfg.max_qp_wr);
virtio_cread(rdev->vdev, struct vrdma_config, device_cap_flags, &cfg.device_cap_flags);
virtio_cread(rdev->vdev, struct vrdma_config, max_send_sge, &cfg.max_send_sge);
virtio_cread(rdev->vdev, struct vrdma_config, max_recv_sge, &cfg.max_recv_sge);
virtio_cread(rdev->vdev, struct vrdma_config, max_sge_rd, &cfg.max_sge_rd);
virtio_cread(rdev->vdev, struct vrdma_config, max_cq, &cfg.max_cq);
virtio_cread(rdev->vdev, struct vrdma_config, max_cqe, &cfg.max_cqe);
virtio_cread(rdev->vdev, struct vrdma_config, max_mr, &cfg.max_mr);
virtio_cread(rdev->vdev, struct vrdma_config, max_pd, &cfg.max_pd);
virtio_cread(rdev->vdev, struct vrdma_config, max_qp_rd_atom, &cfg.max_qp_rd_atom);
virtio_cread(rdev->vdev, struct vrdma_config, max_res_rd_atom, &cfg.max_res_rd_atom);
virtio_cread(rdev->vdev, struct vrdma_config, max_qp_init_rd_atom, &cfg.max_qp_init_rd_atom);
virtio_cread(rdev->vdev, struct vrdma_config, atomic_cap, &cfg.atomic_cap);
virtio_cread(rdev->vdev, struct vrdma_config, max_mw, &cfg.max_mw);
virtio_cread(rdev->vdev, struct vrdma_config, max_mcast_grp, &cfg.max_mcast_grp);
virtio_cread(rdev->vdev, struct vrdma_config, max_mcast_qp_attach, &cfg.max_mcast_qp_attach);
virtio_cread(rdev->vdev, struct vrdma_config, max_total_mcast_qp_attach, &cfg.max_total_mcast_qp_attach);
virtio_cread(rdev->vdev, struct vrdma_config, max_ah, &cfg.max_ah);
virtio_cread(rdev->vdev, struct vrdma_config, max_fast_reg_page_list_len, &cfg.max_fast_reg_page_list_len);
virtio_cread(rdev->vdev, struct vrdma_config, max_pi_fast_reg_page_list_len, &cfg.max_pi_fast_reg_page_list_len);
virtio_cread(rdev->vdev, struct vrdma_config, max_pkeys, &cfg.max_pkeys);
virtio_cread(rdev->vdev, struct vrdma_config, local_ca_ack_delay, &cfg.local_ca_ack_delay);
/* Copy values into ib_device_attr with proper type conversion */
rdev->ib_dev.phys_port_cnt = le32_to_cpu(cfg.phys_port_cnt);
attr->sys_image_guid = le64_to_cpu(cfg.sys_image_guid);
attr->vendor_id = le32_to_cpu(cfg.vendor_id);
attr->vendor_part_id = le32_to_cpu(cfg.vendor_part_id);
attr->hw_ver = le32_to_cpu(cfg.hw_ver);
attr->max_mr_size = le64_to_cpu(cfg.max_mr_size);
attr->page_size_cap = le64_to_cpu(cfg.page_size_cap);
attr->max_qp = le32_to_cpu(cfg.max_qp);
attr->max_qp_wr = le32_to_cpu(cfg.max_qp_wr);
attr->device_cap_flags = le64_to_cpu(cfg.device_cap_flags);
attr->max_send_sge = le32_to_cpu(cfg.max_send_sge);
attr->max_recv_sge = le32_to_cpu(cfg.max_recv_sge);
attr->max_srq_sge = attr->max_send_sge; /* unless SRQ supported */
attr->max_sge_rd = le32_to_cpu(cfg.max_sge_rd);
attr->max_cq = le32_to_cpu(cfg.max_cq);
attr->max_cqe = le32_to_cpu(cfg.max_cqe);
attr->max_mr = le32_to_cpu(cfg.max_mr);
attr->max_pd = le32_to_cpu(cfg.max_pd);
attr->max_qp_rd_atom = le32_to_cpu(cfg.max_qp_rd_atom);
attr->max_res_rd_atom = le32_to_cpu(cfg.max_res_rd_atom);
attr->max_qp_init_rd_atom = le32_to_cpu(cfg.max_qp_init_rd_atom);
attr->atomic_cap = vrdma_atomic_cap_to_ib(le32_to_cpu(cfg.atomic_cap));
attr->max_mw = le32_to_cpu(cfg.max_mw);
attr->max_mcast_grp = le32_to_cpu(cfg.max_mcast_grp);
attr->max_mcast_qp_attach = le32_to_cpu(cfg.max_mcast_qp_attach);
attr->max_total_mcast_qp_attach = le32_to_cpu(cfg.max_total_mcast_qp_attach);
attr->max_ah = le32_to_cpu(cfg.max_ah);
attr->max_fast_reg_page_list_len = le32_to_cpu(cfg.max_fast_reg_page_list_len);
attr->max_pi_fast_reg_page_list_len = le32_to_cpu(cfg.max_pi_fast_reg_page_list_len);
attr->max_pkeys = le16_to_cpu(cfg.max_pkeys);
attr->local_ca_ack_delay = cfg.local_ca_ack_delay;
}
/**
* vrdma_init_device - Initialize virtqueues for a vRDMA device
* @dev: The vRDMA device to initialize
*
* Returns 0 on success, or negative errno on failure.
*/
int vrdma_init_device(struct vrdma_dev *dev)
{
int rc;
struct virtqueue **vqs;
struct virtqueue_info *vqs_info;
unsigned int i, cur_vq;
unsigned int total_vqs;
uint32_t max_cq, max_qp;
/* Initialize device attributes */
init_device_attr(dev);
max_cq = dev->attr.max_cq;
max_qp = dev->attr.max_qp; /* SRQ not supported, so ignored */
/*
* Total virtqueues:
* 1 control queue (for verbs commands)
* max_cq completion queues (CQ)
* max_qp * 2 data queues (send & recv queue pairs per QP)
*/
total_vqs = 1 + max_cq + 2 * max_qp;
/* Allocate storage in dev */
dev->cq_vqs = kcalloc(max_cq, sizeof(*dev->cq_vqs), GFP_ATOMIC);
if (!dev->cq_vqs)
return -ENOMEM;
dev->cqs = kcalloc(max_cq, sizeof(*dev->cqs), GFP_ATOMIC);
if (!dev->cqs) {
rc = -ENOMEM;
goto err_free_cq_vqs;
}
dev->qp_vqs = kcalloc(2 * max_qp, sizeof(*dev->qp_vqs), GFP_ATOMIC);
if (!dev->qp_vqs) {
rc = -ENOMEM;
goto err_free_cqs;
}
vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL);
/* Temporary arrays for virtio_find_vqs */
vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs_info || !vqs) {
rc = -ENOMEM;
goto err_free_vqs;
}
/* Setup queue names and callbacks */
cur_vq = 0;
/* Control virtqueue (no callback) */
vqs_info[cur_vq].name = "vrdma-ctrl";
vqs_info[cur_vq].callback = NULL;
cur_vq++;
/* Completion Queue virtqueues */
for (i = 0; i < max_cq; i++) {
snprintf(dev->cq_vqs[i].name, sizeof(dev->cq_vqs[i].name),
"cq.%u", i);
vqs_info[cur_vq].name = dev->cq_vqs[i].name;
vqs_info[cur_vq].callback = vrdma_cq_ack;
cur_vq++;
}
/* Send/Receive Queue Pairs for each QP */
for (i = 0; i < max_qp; i++) {
snprintf(dev->qp_vqs[2 * i].name, sizeof(dev->qp_vqs[2 * i].name),
"sqp.%u", i);
snprintf(dev->qp_vqs[2 * i + 1].name, sizeof(dev->qp_vqs[2 * i + 1].name),
"rqp.%u", i);
vqs_info[cur_vq].name = dev->qp_vqs[2 * i].name;
vqs_info[cur_vq + 1].name = dev->qp_vqs[2 * i + 1].name;
vqs_info[cur_vq].callback = NULL; /* No TX callback */
vqs_info[cur_vq + 1].callback = NULL; /* No RX callback */
cur_vq += 2;
}
/* Now ask VirtIO layer to set up the virtqueues */
rc = virtio_find_vqs(dev->vdev, total_vqs, vqs, vqs_info, NULL);
if (rc) {
pr_err("Failed to find %u virtqueues: %d\n", total_vqs, rc);
goto err_free_vqs;
}
/* Assign found virtqueues to device structures */
cur_vq = 0;
dev->ctrl_vq = vqs[cur_vq++];
for (i = 0; i < max_cq; i++) {
dev->cq_vqs[i].vq = vqs[cur_vq++];
dev->cq_vqs[i].idx = i;
spin_lock_init(&dev->cq_vqs[i].lock);
}
for (i = 0; i < max_qp; i++) {
struct vrdma_vq *sq = &dev->qp_vqs[2 * i];
struct vrdma_vq *rq = &dev->qp_vqs[2 * i + 1];
sq->vq = vqs[cur_vq++];
rq->vq = vqs[cur_vq++];
sq->idx = i;
rq->idx = i;
spin_lock_init(&sq->lock);
spin_lock_init(&rq->lock);
}
/* Final setup */
mutex_init(&dev->port_mutex);
dev->ib_active = true;
/* Cleanup temporary arrays */
kfree(vqs);
return 0;
err_free_vqs:
kfree(vqs_info);
kfree(vqs);
err_free_cqs:
kfree(dev->cqs);
dev->cqs = NULL;
err_free_cq_vqs:
kfree(dev->cq_vqs);
dev->cq_vqs = NULL;
return rc;
}
void vrdma_finish_device(struct vrdma_dev *dev)
{
if (!dev) {
pr_err("%s: invalid device pointer\n", __func__);
return;
}
if (!dev->vdev || !dev->vdev->config) {
pr_warn("%s: device or config is NULL, skipping teardown\n", __func__);
return;
}
/* Step 1: Mark device as inactive to prevent new operations */
dev->ib_active = false;
/* Step 2: Synchronize and stop any pending work (e.g., CQ processing) */
mutex_lock(&dev->port_mutex);
/* If there are workqueues or timers, flush them here */
// flush_work(&dev->cq_task); // example
// del_timer_sync(&dev->poll_timer); // example
mutex_unlock(&dev->port_mutex);
/* Step 3: Bring the device into reset state */
dev->vdev->config->reset(dev->vdev);
/* Step 4: Delete all virtqueues (this also synchronizes with callbacks) */
dev->vdev->config->del_vqs(dev->vdev);
/* Step 5: Free dynamically allocated arrays */
kfree(dev->cq_vqs); /* Free CQ queue metadata */
dev->cq_vqs = NULL;
kfree(dev->cqs); /* Free CQ context array */
dev->cqs = NULL;
kfree(dev->qp_vqs); /* Free QP send/receive queue metadata */
dev->qp_vqs = NULL;
} 这一段