/**
@brief Application entry point.
Initializes EAL, parses args, sets up ports, mempools, rings,
registers vhost drivers, launches threads.
*/
int main(int argc, char **argv)
{
unsigned lcore_id, core_id = 0;
int ret;
uint16_t port_id;
bool pair_found = false;
struct rte_eth_dev_info dev_info;
/* Register signal handler for clean shutdown */
signal(SIGINT, vhost_rdma_signal_handler);
signal(SIGTERM, vhost_rdma_signal_handler);
/* Initialize DPDK Environment Abstraction Layer */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_panic(“Unable to initialize DPDK EAL\n”);
argc -= ret;
argv += ret;
rte_log_set_global_level(RTE_LOG_NOTICE);
/* Parse application-specific arguments */
if (vhost_rdma_parse_args(argc, argv) != 0) {
rte_exit(EXIT_FAILURE, “Argument parsing failed\n”);
}
/* Initialize per-lcore data structures */
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
if (rte_lcore_is_enabled(lcore_id)) {
lcore_ids[core_id++] = lcore_id;
}
}
if (rte_lcore_count() < 2) {
rte_exit(EXIT_FAILURE, “At least two cores required (one main + one worker)\n”);
}
/*
Create shared memory pool for mbufs
Used by both RX and TX paths
*/
vhost_rdma_mbuf_pool = rte_pktmbuf_pool_create(
“mbuf_pool_shared”,
total_num_mbufs,
MBUF_CACHE_SIZE,
sizeof(struct vhost_rdma_pkt_info),
MBUF_DATA_SIZE,
rte_socket_id()
);
if (vhost_rdma_mbuf_pool == NULL) {
rte_exit(EXIT_FAILURE, “Cannot create mbuf pool: %s\n”, rte_strerror(rte_errno));
}
/*
Create shared rings for packet exchange
SP_ENQ: Single-producer enqueue (from NIC)
MC_HTS_DEQ: Multi-consumer with HTS dequeue (to workers)
*/
vhost_rdma_rx_ring = rte_ring_create(
“ring_rx_shared”,
MAX_RING_COUNT,
rte_socket_id(),
RING_F_SP_ENQ | RING_F_MC_HTS_DEQ
);
if (vhost_rdma_rx_ring == NULL)
rte_exit(EXIT_FAILURE, “Failed to create RX ring: %s\n”, rte_strerror(rte_errno));
vhost_rdma_tx_ring = rte_ring_create(
“ring_tx_shared”,
MAX_RING_COUNT,
rte_socket_id(),
RING_F_MP_HTS_ENQ | RING_F_SC_DEQ
);
if (vhost_rdma_tx_ring == NULL)
rte_exit(EXIT_FAILURE, “Failed to create TX ring: %s\n”, rte_strerror(rte_errno));
/*
Find and initialize backend Ethernet device (e.g., net_tap or net_vhost)
*/
RTE_ETH_FOREACH_DEV(port_id) {
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0) {
RDMA_LOG_ERR(“Failed to get info for port %u\n”, port_id);
continue;
}
if (!pair_found &&
(strcmp(dev_info.driver_name, “net_tap”) == 0 ||
strcmp(dev_info.driver_name, “net_vhost”) == 0)) {
pair_port_id = port_id; pair_found = true; ret = vhost_rdma_init_port(port_id, !!enable_tx_csum); if (ret != 0) { rte_exit(EXIT_FAILURE, "Failed to initialize port %u: %s\n", port_id, rte_strerror(-ret)); } RDMA_LOG_INFO("Using device %s (port %u) as backend interface\n", dev_info.device->name, port_id);
}
}
if (!pair_found) {
rte_exit(EXIT_FAILURE, “No suitable backend Ethernet device found\n”);
}
/*
Launch worker threads for packet processing
*/
RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(vhost_rdma_txrx_main_thread, NULL, lcore_id);
}
/*
Setup per-vhost-device resources and register vhost drivers
*/
char name_buf[SOCKET_PATH_MAX];
for (int i = 0; i < nb_sockets; i++) {
const char *sock_path = socket_path + i * SOCKET_PATH_MAX;
struct vhost_rdma_device *dev = &g_vhost_rdma_dev[i];
dev->vid = i;
if (i == 0) {
/* Use shared resources for first device /
dev->rx_ring = vhost_rdma_rx_ring;
dev->tx_ring = vhost_rdma_tx_ring;
dev->mbuf_pool = vhost_rdma_mbuf_pool;
} else {
/ Create dedicated resources for additional devices */
snprintf(name_buf, sizeof(name_buf), “dev%u_rx_ring”, i);
dev->rx_ring = rte_ring_create(name_buf, MAX_RING_COUNT,
rte_socket_id(), RING_F_SP_ENQ | RING_F_MC_HTS_DEQ);
if (!dev->rx_ring)
rte_exit(EXIT_FAILURE, “Failed to create RX ring %d\n”, i);
snprintf(name_buf, sizeof(name_buf), "dev%u_tx_ring", i); dev->tx_ring = rte_ring_create(name_buf, MAX_RING_COUNT, rte_socket_id(), RING_F_MP_HTS_ENQ | RING_F_SC_DEQ); if (!dev->tx_ring) rte_exit(EXIT_FAILURE, "Failed to create TX ring %d\n", i); snprintf(name_buf, sizeof(name_buf), "dev%u_mbuf_pool", i); dev->mbuf_pool = rte_pktmbuf_pool_create(name_buf, total_num_mbufs, MBUF_CACHE_SIZE, sizeof(struct vhost_rdma_pkt_info), MBUF_DATA_SIZE, rte_socket_id()); if (!dev->mbuf_pool) rte_exit(EXIT_FAILURE, "Failed to create mbuf pool %d\n", i);
}
snprintf(name_buf, sizeof(name_buf), “dev%u_task_ring”, i);
dev->task_ring = rte_ring_create(name_buf, MAX_RING_COUNT,
rte_socket_id(),
RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
if (!dev->task_ring)
rte_exit(EXIT_FAILURE, “Failed to create task ring %d\n”, i);
/* Construct and register vhost device */
ret = vhost_rdma_construct(dev, sock_path, i);
if (ret < 0) {
RDMA_LOG_ERR(“Failed to construct vhost device %d\n”, i);
continue;
}
ret = rte_vhost_driver_start(sock_path);
if (ret < 0) {
RDMA_LOG_ERR(“Failed to start vhost driver for %s\n”, sock_path);
} else {
RDMA_LOG_INFO(“Successfully started vhost driver: %s\n”, sock_path);
}
}
/* Wait for all worker threads to complete (they won’t unless forced) */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_wait_lcore(lcore_id);
}
/* Cleanup */
rte_eal_cleanup();
free(socket_path);
RDMA_LOG_INFO(“Application terminated gracefully.\n”);
return 0;
} 按照dpdk社区代码提交规则生成英文commit信息,作者是 Xiong Weimin xiongweimin@kylinos.cn
最新发布