packet event-handler table

本文详细解析了一组特定的内存地址映射数据,通过观察不同地址对应的值,可以帮助理解内存布局及分配机制。通过对这些地址及其指向地址的研究,读者可以深入了解计算机内存管理的基本原理。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

0H  6FB6D7A0H

1H  6FB70BB0H

2H  6FB6D770H

3H  6FB6ED20H

4H  6FB6F520H

5H  6FB6DE40H

6H  6FB6E520H

7H  6FB6D6C0H

8H  6FB6D650H

9H  6FB6EF50H

AH  6FB6DDE0H

BH  6FB70CB0H

CH  6FB6D7A0H

DH  6FB6D7A0H

EH  6FB6D7A0H

FH  6FB6D7A0H

10H  6FB6D7A0H

11H  6FB6EE40H

12H  6FB6D5F0H

13H  6FB6D5E0H

14H  6FB6D5D0H

15H  6FB6F410H

16H  6FB6EC50H

17H  6FB6D7A0H

18H  6FB702A0H

19H  6FB703F0H

1AH  6FB703F0H

1BH  6FB703F0H

1CH  6FB703F0H

1DH  6FB703F0H

1EH  6FB703F0H

1FH  6FB703F0H

20H  6FB70380H

21H  6FB6DCB0H

22H  6FB6DBA0H

23H  6FB6DB20H

24H  6FB6D7A0H

25H  6FB6D7A0H

26H  6FB70B30H

27H  6FB6E980H

28H  6FB70C90H

29H  6FB6D5B0H

2AH  6FB70C50H

2BH  6FB6D7A0H

2CH  6FB6E960H

2DH  6FB6D7A0H

2EH  6FB6D7A0H

2FH  6FB6D7A0H

30H  6FB6D7A0H

31H  6FB6D7A0H

32H  6FB6D7A0H

33H  6FB6D7A0H

34H  6FB6D7A0H

35H  6FB6D7A0H

36H  6FB6D7A0H

37H  6FB6D7A0H

38H  6FB6D7A0H

39H  6FB6D7A0H

3AH  6FB6D7A0H

3BH  6FB6D7A0H

3CH  6FB6D7A0H

3DH  6FB6D7A0H

3EH  6FB6F830H

3FH  6FB70A80H

40H  6FB6DAF0H

41H  6FB6D7A0H

42H  6FB6F310H

43H  6FB6D7A0H

44H  6FB6D7A0H

45H  6FB6D3B0H

46H  6FB6D7A0H

47H  6FB6F7E0H

48H  6FB6F790H

49H  6FB6D7A0H

4AH  6FB6D7A0H

4BH  6FB6D7A0H

4CH  6FB6D7A0H

4DH  6FB6D7A0H

4EH  6FB6D330H

4FH  6FB6D360H

50H  6FB70AF0H

51H  6FB6EF20H

52H  6FB6E7A0H

53H  6FB70000H

54H  6FB6D3A0H

55H  6FB6D7A0H

56H  6FB6D7A0H

57H  6FB6E840H

58H  6FB70C20H

59H  6FB6EF00H

5AH  6FB6E9B0H

5BH  6FB6EB70H

5CH  6FB6EB60H

5DH  6FB70A60H

5EH  6FB6D310H

5FH  6FB6D2D0H

60H  6FB6E3F0H

61H  6FB70BE0H

62H  6FB6F400H

63H  6FB70AB0H

64H  6FB6D7A0H

65H  6FB6EB10H

66H  6FB6D2C0H

67H  6FB6D7A0H

68H  6FB6D7A0H

69H  6FB6D7A0H

6AH  6FB6D7A0H

6BH  6FB6D7A0H

6CH  6FB6D7A0H

6DH  6FB6D7A0H

6EH  6FB6D7A0H

6FH  6FB6D7A0H

70H  6FB6D7A0H

71H  6FB6D7A0H

72H  6FB6D7A0H

73H  6FB70D10H

74H  6FB6EED0H

75H  6FB6EAF0H

76H  6FB6E440H

77H  6FB70AA0H

78H  6FB6DA50H

79H  6FB6DA30H

7AH  6FB6DA00H

7BH  6FB6D9B0H

7CH  6FB6D960H

7DH  6FB6F720H

7EH  6FB6E390H

7FH  6FB6EAA0H

80H  0H

81H  6FB6FFC0H

82H  6FB6E2B0H

83H  6FB6D7A0H

84H  6FB6D7A0H

85H  6FB6D7A0H

86H  6FB6D7A0H

87H  6FB6D7A0H

88H  6FB6D7A0H

89H  6FB6D950H

8AH  6FB6ED40H

8BH  6FB6EA80H

8CH  6FB6EA30H

8DH  6FB6EA00H

8EH  6FB6E280H

8FH  6FB6D260H

90H  6FB6E370H

91H  6FB6DAC0H

92H  6FB6F760H

93H  6FB6E4A0H

94H  6FB6DC20H

95H  6FB701E0H

96H  6FB6EBD0H

97H  6FB6D270H

98H  6FB6E770H

99H  6FB6F390H

9AH  6FB6F330H

9BH  6FB6FFB0H

9CH  6FB6F5C0H

9DH  6FB6FE60H

9EH  6FB70080H

9FH  6FB70080H

A0H  6FB70080H

A1H  6FB70080H

A2H  6FB70080H

A3H  6FB6ED60H

A4H  6FB6E9E0H

A5H  6FB6F920H

A6H  6FB6D240H

A7H  6FB6F2D0H

A8H  6FB6F190H

A9H  6FB6F150H

AAH  6FB6EFB0H

ABH  6FB6E200H

ACH  6FB6FA10H

ADH  6FB6D7A0H

AEH  6FB6E910H

AFH  277F2783H

B0H  277E2782H

B1H  27882789H

B2H  6H

B3H  FFFFFFFFH

B4H  F5E0F5AH

B5H  5H

B6H  1H

B7H  19H

B8H  3H

B9H  5H

BAH  5H

BBH  4H

BCH  AH

BDH  CH

BEH  2H

BFH  3H

C0H  AH

C1H  0H

C2H  0H

C3H  0H

C4H  1H

C5H  2H

C6H  3H

C7H  3H

C8H  2H

C9H  2H

CAH  2H

CBH  2H

CCH  0H

CDH  0H

CEH  0H

CFH  2H

D0H  2H

D1H  1H

D2H  0H

D3H  2H

D4H  2H

D5H  2H

D6H  4H

D7H  4H

D8H  3H

D9H  4H

DAH  4H

DBH  4H

DCH  0H

DDH  0H

DEH  2H

DFH  2H

E0H  2H

E1H  2H

E2H  FFFFFFFFH

E3H  6FB602F0H

E4H  0H

E5H  0H

E6H  0H

E7H  6FB5F940H

E8H  6FB60160H

E9H  6FB5F930H

EAH  6FB8382CH

EBH  6FB83814H

ECH  6FB837FCH

EDH  6FB837E4H

EEH  6FB837CCH

EFH  6FB837B4H

F0H  6FB8379CH

F1H  6FB83784H

F2H  6FB8376CH

F3H  E980040H

F4H  E9C0040H

F5H  E8D0E8DH

F6H  E900040H

F7H  40H

F8H  E9D0002H

F9H  E8D0051H

FAH  E8D0E8DH

FBH  E9F0E8DH

FCH  E8E0E8DH

FDH  A30E84H

FEH  E950092H

FFH  E8D0E8DH

 

转载于:https://www.cnblogs.com/Timanders/archive/2007/11/22/968397.html

// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2024 MediaTek Inc. /* * virtio transport for vsock * * Copyright (C) 2013-2015 Red Hat, Inc. * Author: Asias He <asias@redhat.com> * Stefan Hajnoczi <stefanha@redhat.com> * * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s * early virtio-vsock proof-of-concept bits. */ #include <linux/spinlock.h> #include <linux/module.h> #include <linux/list.h> #include <linux/atomic.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/virtio_vsock.h> #include <linux/of_irq.h> #include <net/sock.h> #include <linux/mutex.h> #include <net/af_vsock.h> #include <linux/types.h> #include <uapi/linux/sched/types.h> #include <linux/sched.h> #include <linux/kthread.h> static struct workqueue_struct *virtio_vsock_workqueue; static struct virtio_vsock __rcu *the_virtio_vsock; static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ static struct virtio_transport virtio_transport; /* forward declaration */ struct virtio_vsock { struct virtio_device *vdev; struct virtqueue *vqs[VSOCK_VQ_MAX]; /* Virtqueue processing is deferred to a workqueue */ struct work_struct tx_work; struct work_struct rx_work; struct work_struct event_work; /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] * must be accessed with tx_lock held. */ struct mutex tx_lock; bool tx_run; struct work_struct send_pkt_work; spinlock_t send_pkt_list_lock; struct list_head send_pkt_list; atomic_t queued_replies; /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] * must be accessed with rx_lock held. */ struct mutex rx_lock; bool rx_run; int rx_buf_nr; int rx_buf_max_nr; /* The following fields are protected by event_lock. * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. */ struct mutex event_lock; bool event_run; struct virtio_vsock_event event_list[8]; u32 guest_cid; bool seqpacket_allow; int num_hwirq_vq; int kick_irq[VSOCK_VQ_MAX]; int notify_irq[VSOCK_VQ_MAX]; }; #include <linux/arm-smccc.h> #define SMC_FC_NBL_VHM_REQ 0xB4000100 #define SMC_HYP_SECURE_ID 1 /* SMC call for hypervisor */ static void virtio_vsock_hw_irq_notify(uint32_t irq) { struct arm_smccc_res res; unsigned long r7 = SMC_HYP_SECURE_ID << 16; arm_smccc_smc(SMC_FC_NBL_VHM_REQ, 0, irq, 0, 0, 0, 0, r7, &res); } static void virtio_vsock_notify(struct virtio_vsock *vsock, int qidx) { struct virtqueue *vq = vsock->vqs[qidx]; int notify_irq = vsock->notify_irq[qidx]; if (notify_irq) { if (virtqueue_kick_prepare(vq)) virtio_vsock_hw_irq_notify(notify_irq); } else { virtqueue_kick(vq); } } static u32 virtio_transport_get_local_cid(void) { struct virtio_vsock *vsock; u32 ret; rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { ret = VMADDR_CID_ANY; goto out_rcu; } ret = vsock->guest_cid; out_rcu: rcu_read_unlock(); return ret; } static void virtio_transport_send_pkt_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, send_pkt_work); struct virtqueue *vq; bool added = false; bool restart_rx = false; mutex_lock(&vsock->tx_lock); if (!vsock->tx_run) goto out; vq = vsock->vqs[VSOCK_VQ_TX]; for (;;) { struct virtio_vsock_pkt *pkt; struct scatterlist hdr, buf, *sgs[2]; int ret, in_sg = 0, out_sg = 0; bool reply; spin_lock_bh(&vsock->send_pkt_list_lock); if (list_empty(&vsock->send_pkt_list)) { spin_unlock_bh(&vsock->send_pkt_list_lock); break; } pkt = list_first_entry(&vsock->send_pkt_list, struct virtio_vsock_pkt, list); list_del_init(&pkt->list); spin_unlock_bh(&vsock->send_pkt_list_lock); virtio_transport_deliver_tap_pkt(pkt); reply = pkt->reply; sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); sgs[out_sg++] = &hdr; if (pkt->buf) { sg_init_one(&buf, pkt->buf, pkt->len); sgs[out_sg++] = &buf; } ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); /* Usually this means that there is no more space available in * the vq */ if (ret < 0) { spin_lock_bh(&vsock->send_pkt_list_lock); list_add(&pkt->list, &vsock->send_pkt_list); spin_unlock_bh(&vsock->send_pkt_list_lock); break; } if (reply) { struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; int val; val = atomic_dec_return(&vsock->queued_replies); /* Do we now have resources to resume rx processing? */ if (val + 1 == virtqueue_get_vring_size(rx_vq)) restart_rx = true; } added = true; } if (added) virtio_vsock_notify(vsock, VSOCK_VQ_TX); out: mutex_unlock(&vsock->tx_lock); if (restart_rx) queue_work(virtio_vsock_workqueue, &vsock->rx_work); } static int virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) { struct virtio_vsock *vsock; int len = pkt->len; rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { virtio_transport_free_pkt(pkt); len = -ENODEV; goto out_rcu; } if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) { virtio_transport_free_pkt(pkt); len = -ENODEV; goto out_rcu; } if (pkt->reply) atomic_inc(&vsock->queued_replies); spin_lock_bh(&vsock->send_pkt_list_lock); list_add_tail(&pkt->list, &vsock->send_pkt_list); spin_unlock_bh(&vsock->send_pkt_list_lock); queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); out_rcu: rcu_read_unlock(); return len; } static int virtio_transport_cancel_pkt(struct vsock_sock *vsk) { struct virtio_vsock *vsock; struct virtio_vsock_pkt *pkt, *n; int cnt = 0, ret; LIST_HEAD(freeme); rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { ret = -ENODEV; goto out_rcu; } spin_lock_bh(&vsock->send_pkt_list_lock); list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { if (pkt->vsk != vsk) continue; list_move(&pkt->list, &freeme); } spin_unlock_bh(&vsock->send_pkt_list_lock); list_for_each_entry_safe(pkt, n, &freeme, list) { if (pkt->reply) cnt++; list_del(&pkt->list); virtio_transport_free_pkt(pkt); } if (cnt) { struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; int new_cnt; new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && new_cnt < virtqueue_get_vring_size(rx_vq)) queue_work(virtio_vsock_workqueue, &vsock->rx_work); } ret = 0; out_rcu: rcu_read_unlock(); return ret; } static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; struct virtio_vsock_pkt *pkt; struct scatterlist hdr, buf, *sgs[2]; struct virtqueue *vq; int ret; vq = vsock->vqs[VSOCK_VQ_RX]; do { pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); if (!pkt) break; pkt->buf = kmalloc(buf_len, GFP_KERNEL); if (!pkt->buf) { virtio_transport_free_pkt(pkt); break; } pkt->buf_len = buf_len; pkt->len = buf_len; sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); sgs[0] = &hdr; sg_init_one(&buf, pkt->buf, buf_len); sgs[1] = &buf; ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); if (ret) { virtio_transport_free_pkt(pkt); break; } vsock->rx_buf_nr++; } while (vq->num_free); if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) vsock->rx_buf_max_nr = vsock->rx_buf_nr; virtio_vsock_notify(vsock, VSOCK_VQ_RX); } static void virtio_transport_tx_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, tx_work); struct virtqueue *vq; bool added = false; vq = vsock->vqs[VSOCK_VQ_TX]; mutex_lock(&vsock->tx_lock); if (!vsock->tx_run) goto out; do { struct virtio_vsock_pkt *pkt; unsigned int len; virtqueue_disable_cb(vq); while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) { virtio_transport_free_pkt(pkt); added = true; } } while (!virtqueue_enable_cb(vq)); out: mutex_unlock(&vsock->tx_lock); if (added) queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); } /* Is there space left for replies to rx packets? */ static bool virtio_transport_more_replies(struct virtio_vsock *vsock) { struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; int val; smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ val = atomic_read(&vsock->queued_replies); return val < virtqueue_get_vring_size(vq); } /* event_lock must be held */ static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, struct virtio_vsock_event *event) { struct scatterlist sg; struct virtqueue *vq; vq = vsock->vqs[VSOCK_VQ_EVENT]; sg_init_one(&sg, event, sizeof(*event)); return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); } /* event_lock must be held */ static void virtio_vsock_event_fill(struct virtio_vsock *vsock) { size_t i; for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { struct virtio_vsock_event *event = &vsock->event_list[i]; virtio_vsock_event_fill_one(vsock, event); } virtio_vsock_notify(vsock, VSOCK_VQ_EVENT); } static void virtio_vsock_reset_sock(struct sock *sk) { /* vmci_transport.c doesn't take sk_lock here either. At least we're * under vsock_table_lock so the sock cannot disappear while we're * executing. */ sk->sk_state = TCP_CLOSE; sk->sk_err = ECONNRESET; sk_error_report(sk); } static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) { struct virtio_device *vdev = vsock->vdev; __le64 guest_cid; vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), &guest_cid, sizeof(guest_cid)); vsock->guest_cid = le64_to_cpu(guest_cid); } /* event_lock must be held */ static void virtio_vsock_event_handle(struct virtio_vsock *vsock, struct virtio_vsock_event *event) { switch (le32_to_cpu(event->id)) { case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: virtio_vsock_update_guest_cid(vsock); vsock_for_each_connected_socket(&virtio_transport.transport, virtio_vsock_reset_sock); break; } } static void virtio_transport_event_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, event_work); struct virtqueue *vq; vq = vsock->vqs[VSOCK_VQ_EVENT]; mutex_lock(&vsock->event_lock); if (!vsock->event_run) goto out; do { struct virtio_vsock_event *event; unsigned int len; virtqueue_disable_cb(vq); while ((event = virtqueue_get_buf(vq, &len)) != NULL) { if (len == sizeof(*event)) virtio_vsock_event_handle(vsock, event); virtio_vsock_event_fill_one(vsock, event); } } while (!virtqueue_enable_cb(vq)); virtio_vsock_notify(vsock, VSOCK_VQ_EVENT); out: mutex_unlock(&vsock->event_lock); } static void virtio_vsock_event_done(struct virtqueue *vq) { struct virtio_vsock *vsock = vq->vdev->priv; if (!vsock) return; queue_work(virtio_vsock_workqueue, &vsock->event_work); } static void virtio_vsock_tx_done(struct virtqueue *vq) { struct virtio_vsock *vsock = vq->vdev->priv; if (!vsock) return; queue_work(virtio_vsock_workqueue, &vsock->tx_work); } static void virtio_vsock_rx_done(struct virtqueue *vq) { struct virtio_vsock *vsock = vq->vdev->priv; if (!vsock) return; queue_work(virtio_vsock_workqueue, &vsock->rx_work); } static bool virtio_transport_seqpacket_allow(u32 remote_cid); static struct virtio_transport virtio_transport = { .transport = { .module = THIS_MODULE, .get_local_cid = virtio_transport_get_local_cid, .init = virtio_transport_do_socket_init, .destruct = virtio_transport_destruct, .release = virtio_transport_release, .connect = virtio_transport_connect, .shutdown = virtio_transport_shutdown, .cancel_pkt = virtio_transport_cancel_pkt, .dgram_bind = virtio_transport_dgram_bind, .dgram_dequeue = virtio_transport_dgram_dequeue, .dgram_enqueue = virtio_transport_dgram_enqueue, .dgram_allow = virtio_transport_dgram_allow, .stream_dequeue = virtio_transport_stream_dequeue, .stream_enqueue = virtio_transport_stream_enqueue, .stream_has_data = virtio_transport_stream_has_data, .stream_has_space = virtio_transport_stream_has_space, .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, .stream_is_active = virtio_transport_stream_is_active, .stream_allow = virtio_transport_stream_allow, .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, .seqpacket_allow = virtio_transport_seqpacket_allow, .seqpacket_has_data = virtio_transport_seqpacket_has_data, .notify_poll_in = virtio_transport_notify_poll_in, .notify_poll_out = virtio_transport_notify_poll_out, .notify_recv_init = virtio_transport_notify_recv_init, .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, .notify_send_init = virtio_transport_notify_send_init, .notify_send_pre_block = virtio_transport_notify_send_pre_block, .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, .notify_buffer_size = virtio_transport_notify_buffer_size, }, .send_pkt = virtio_transport_send_pkt, }; static bool virtio_transport_seqpacket_allow(u32 remote_cid) { struct virtio_vsock *vsock; bool seqpacket_allow; seqpacket_allow = false; rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (vsock) seqpacket_allow = vsock->seqpacket_allow; rcu_read_unlock(); return seqpacket_allow; } static void virtio_transport_rx_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, rx_work); struct virtqueue *vq; vq = vsock->vqs[VSOCK_VQ_RX]; mutex_lock(&vsock->rx_lock); if (!vsock->rx_run) goto out; do { virtqueue_disable_cb(vq); for (;;) { struct virtio_vsock_pkt *pkt; unsigned int len; if (!virtio_transport_more_replies(vsock)) { /* Stop rx until the device processes already * pending replies. Leave rx virtqueue * callbacks disabled. */ goto out; } pkt = virtqueue_get_buf(vq, &len); if (!pkt) { break; } vsock->rx_buf_nr--; /* Drop short/long packets */ if (unlikely(len < sizeof(pkt->hdr) || len > sizeof(pkt->hdr) + pkt->len)) { virtio_transport_free_pkt(pkt); continue; } pkt->len = len - sizeof(pkt->hdr); virtio_transport_deliver_tap_pkt(pkt); virtio_transport_recv_pkt(&virtio_transport, pkt); } } while (!virtqueue_enable_cb(vq)); out: if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) virtio_vsock_rx_fill(vsock); mutex_unlock(&vsock->rx_lock); } typedef irqreturn_t (*irq_handler_t)(int irq, void *data); static irqreturn_t virtio_vsock_tx_irq_handler(int irq, void *data) { struct virtio_vsock *vsock = data; queue_work(virtio_vsock_workqueue, &vsock->tx_work); return IRQ_HANDLED; } static irqreturn_t virtio_vsock_rx_irq_handler(int irq, void *data) { struct virtio_vsock *vsock = data; queue_work(virtio_vsock_workqueue, &vsock->rx_work); return IRQ_HANDLED; } static irqreturn_t virtio_vsock_event_irq_handler(int irq, void *data) { struct virtio_vsock *vsock = data; queue_work(virtio_vsock_workqueue, &vsock->event_work); return IRQ_HANDLED; } static void virtio_vsock_init_hw_irq(struct virtio_vsock *vsock, irq_handler_t handlers[], size_t num_vq) { struct device_node *irq_node; struct irq_desc *desc; int irq; int ret; int vq_idx; cpumask_t mask; cpumask_clear(&mask); cpumask_set_cpu(5, &mask); irq_node = of_find_compatible_node(NULL, NULL, "nbl,virtio_vsock_irq"); if (irq_node == NULL) { return; } vsock->num_hwirq_vq = num_vq; for (vq_idx = 0; vq_idx < num_vq; vq_idx++) { irq = of_irq_get(irq_node, vq_idx * 2 + 1); BUG_ON(irq == 0); ret = request_irq(irq, handlers[vq_idx], IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, "vritio-vsock", vsock); BUG_ON(ret != 0); vsock->kick_irq[vq_idx] = ret; irq_set_affinity_hint(irq, &mask); irq = of_irq_get(irq_node, vq_idx * 2); BUG_ON(irq == 0); desc = irq_to_desc(irq); BUG_ON(desc == NULL); vsock->notify_irq[vq_idx] = desc->irq_data.hwirq; } } static void virtio_vsock_release_hw_irq(struct virtio_vsock *vsock) { int vq_idx; for (vq_idx = 0; vq_idx < vsock->num_hwirq_vq; vq_idx++) { free_irq(vsock->kick_irq[vq_idx], vsock); } } static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) { struct virtio_device *vdev = vsock->vdev; static const char * const names[] = { "rx", "tx", "event", }; vq_callback_t *callbacks[] = { virtio_vsock_rx_done, virtio_vsock_tx_done, virtio_vsock_event_done, }; irq_handler_t hwirq_vq_handlers[] = { virtio_vsock_rx_irq_handler, virtio_vsock_tx_irq_handler, virtio_vsock_event_irq_handler, }; int ret; ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names, NULL); if (ret < 0) return ret; virtio_vsock_update_guest_cid(vsock); virtio_vsock_init_hw_irq(vsock, hwirq_vq_handlers, /*num_vq=*/2); virtio_device_ready(vdev); return 0; } static void virtio_vsock_vqs_start(struct virtio_vsock *vsock) { mutex_lock(&vsock->tx_lock); vsock->tx_run = true; mutex_unlock(&vsock->tx_lock); mutex_lock(&vsock->rx_lock); virtio_vsock_rx_fill(vsock); vsock->rx_run = true; mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->event_lock); virtio_vsock_event_fill(vsock); vsock->event_run = true; mutex_unlock(&vsock->event_lock); /* virtio_transport_send_pkt() can queue packets once * the_virtio_vsock is set, but they won't be processed until * vsock->tx_run is set to true. We queue vsock->send_pkt_work * when initialization finishes to send those packets queued * earlier. * We don't need to queue the other workers (rx, event) because * as long as we don't fill the queues with empty buffers, the * host can't send us any notification. */ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); } static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) { struct virtio_device *vdev = vsock->vdev; struct virtio_vsock_pkt *pkt; /* Reset all connected sockets when the VQs disappear */ vsock_for_each_connected_socket(&virtio_transport.transport, virtio_vsock_reset_sock); /* Stop all work handlers to make sure no one is accessing the device, * so we can safely call virtio_reset_device(). */ mutex_lock(&vsock->rx_lock); vsock->rx_run = false; mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->tx_lock); vsock->tx_run = false; mutex_unlock(&vsock->tx_lock); mutex_lock(&vsock->event_lock); vsock->event_run = false; mutex_unlock(&vsock->event_lock); /* Flush all device writes and interrupts, device will not use any * more buffers. */ virtio_reset_device(vdev); mutex_lock(&vsock->rx_lock); while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) virtio_transport_free_pkt(pkt); mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->tx_lock); while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) virtio_transport_free_pkt(pkt); mutex_unlock(&vsock->tx_lock); spin_lock_bh(&vsock->send_pkt_list_lock); while (!list_empty(&vsock->send_pkt_list)) { pkt = list_first_entry(&vsock->send_pkt_list, struct virtio_vsock_pkt, list); list_del(&pkt->list); virtio_transport_free_pkt(pkt); } spin_unlock_bh(&vsock->send_pkt_list_lock); virtio_vsock_release_hw_irq(vsock); /* Delete virtqueues and flush outstanding callbacks if any */ vdev->config->del_vqs(vdev); } static int virtio_vsock_probe(struct virtio_device *vdev) { struct virtio_vsock *vsock = NULL; int ret; ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); if (ret) return ret; /* Only one virtio-vsock device per guest is supported */ if (rcu_dereference_protected(the_virtio_vsock, lockdep_is_held(&the_virtio_vsock_mutex))) { ret = -EBUSY; goto out; } vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); if (!vsock) { ret = -ENOMEM; goto out; } vsock->vdev = vdev; vsock->rx_buf_nr = 0; vsock->rx_buf_max_nr = 0; atomic_set(&vsock->queued_replies, 0); mutex_init(&vsock->tx_lock); mutex_init(&vsock->rx_lock); mutex_init(&vsock->event_lock); spin_lock_init(&vsock->send_pkt_list_lock); INIT_LIST_HEAD(&vsock->send_pkt_list); INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); INIT_WORK(&vsock->event_work, virtio_transport_event_work); INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) vsock->seqpacket_allow = true; vdev->priv = vsock; ret = virtio_vsock_vqs_init(vsock); if (ret < 0) goto out; rcu_assign_pointer(the_virtio_vsock, vsock); virtio_vsock_vqs_start(vsock); mutex_unlock(&the_virtio_vsock_mutex); return 0; out: kfree(vsock); mutex_unlock(&the_virtio_vsock_mutex); return ret; } static void virtio_vsock_remove(struct virtio_device *vdev) { struct virtio_vsock *vsock = vdev->priv; mutex_lock(&the_virtio_vsock_mutex); vdev->priv = NULL; rcu_assign_pointer(the_virtio_vsock, NULL); synchronize_rcu(); virtio_vsock_vqs_del(vsock); /* Other works can be queued before 'config->del_vqs()', so we flush * all works before to free the vsock object to avoid use after free. */ flush_work(&vsock->rx_work); flush_work(&vsock->tx_work); flush_work(&vsock->event_work); flush_work(&vsock->send_pkt_work); mutex_unlock(&the_virtio_vsock_mutex); kfree(vsock); } #define CONFIG_PM_SLEEP 1 #ifdef CONFIG_PM_SLEEP static int virtio_vsock_freeze(struct virtio_device *vdev) { struct virtio_vsock *vsock = vdev->priv; mutex_lock(&the_virtio_vsock_mutex); rcu_assign_pointer(the_virtio_vsock, NULL); synchronize_rcu(); virtio_vsock_vqs_del(vsock); //add by konggc struct virtio_vsock *vsock = virtio_get_drvdata(vdev); struct virtio_vsock_pkt *pkt, *tmp; unsigned long flags; vsock->suspended = true; cancel_work_sync(&vsock->rx_fill_work); // 取消未执行的工作 // 释放所有pkt和缓冲区 spin_lock_irqsave(&vsock->rx_lock, flags); list_for_each_entry_safe(pkt, tmp, &vsock->rx_pkts, list) { list_del(&pkt->list); virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX], pkt); kfree(pkt->buf); kfree(pkt); } vsock->rx_buf_nr = 0; spin_unlock_irqrestore(&vsock->rx_lock, flags); //end of add by konggc mutex_unlock(&the_virtio_vsock_mutex); return 0; } static int virtio_vsock_restore(struct virtio_device *vdev) { struct virtio_vsock *vsock = vdev->priv; int ret; mutex_lock(&the_virtio_vsock_mutex); /* Only one virtio-vsock device per guest is supported */ if (rcu_dereference_protected(the_virtio_vsock, lockdep_is_held(&the_virtio_vsock_mutex))) { ret = -EBUSY; goto out; } ret = virtio_vsock_vqs_init(vsock); if (ret < 0) goto out; rcu_assign_pointer(the_virtio_vsock, vsock); virtio_vsock_vqs_start(vsock); out: mutex_unlock(&the_virtio_vsock_mutex); return ret; } #endif /* CONFIG_PM_SLEEP */ static struct virtio_device_id id_table[] = { { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_VSOCK_F_SEQPACKET }; static struct virtio_driver virtio_vsock_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtio_vsock_probe, .remove = virtio_vsock_remove, #ifdef CONFIG_PM_SLEEP .freeze = virtio_vsock_freeze, .restore = virtio_vsock_restore, #endif }; static int __init virtio_vsock_init(void) { int ret; virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); if (!virtio_vsock_workqueue) return -ENOMEM; ret = vsock_core_register(&virtio_transport.transport, VSOCK_TRANSPORT_F_G2H); if (ret) goto out_wq; ret = register_virtio_driver(&virtio_vsock_driver); if (ret) goto out_vci; return 0; out_vci: vsock_core_unregister(&virtio_transport.transport); out_wq: destroy_workqueue(virtio_vsock_workqueue); return ret; } static void __exit virtio_vsock_exit(void) { unregister_virtio_driver(&virtio_vsock_driver); vsock_core_unregister(&virtio_transport.transport); destroy_workqueue(virtio_vsock_workqueue); } module_init(virtio_vsock_init); module_exit(virtio_vsock_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Asias He"); MODULE_DESCRIPTION("virtio transport for vsock"); MODULE_DEVICE_TABLE(virtio, id_table); 上述是原来的代码,下面是做了优先通过工作队列将分配逻辑延迟到进程上下文,保留GFP_KERNEL的高效分配;强化休眠 / 唤醒阶段的资源管理(提前释放、延迟分配),降低内存竞争风险的相关代码#include <linux/virtio.h> #include <linux/virtio_vsock.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/pm.h> // 扩展VSOCK数据包结构体,添加链表成员用于资源跟踪 struct virtio_vsock_pkt { struct virtio_vsock_hdr hdr; void *buf; size_t buf_len; size_t len; struct list_head list; // 用于链表跟踪所有分配的pkt }; // 扩展VSOCK设备结构体,添加工作队列和资源管理成员 struct virtio_vsock { struct virtqueue *vqs[VSOCK_VQ_NUM]; unsigned int rx_buf_nr; unsigned int rx_buf_max_nr; struct list_head rx_pkts; // 跟踪所有分配的pkt spinlock_t rx_lock; // 保护rx_pkts的自旋锁 struct work_struct rx_fill_work; // 工作队列项(延迟分配) bool suspended; // 标记设备是否处于休眠状态 }; // 工作队列处理函数(运行在进程上下文) static void virtio_vsock_rx_fill_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, rx_fill_work); // 仅在非休眠状态下执行分配 if (!vsock->suspended) { virtio_vsock_rx_fill(vsock); } } // 改进后的接收缓冲区填充函数(在进程上下文执行) static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; struct virtio_vsock_pkt *pkt; struct scatterlist hdr, buf, *sgs[2]; struct virtqueue *vq; int ret; vq = vsock->vqs[VSOCK_VQ_RX]; if (!vq) return; do { // 1. 分配pkt结构体(进程上下文,安全使用GFP_KERNEL) pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); if (!pkt) break; // 2. 分配数据缓冲区 pkt->buf = kmalloc(buf_len, GFP_KERNEL); if (!pkt->buf) { kfree(pkt); break; } // 3. 初始化pkt元信息 pkt->buf_len = buf_len; pkt->len = buf_len; INIT_LIST_HEAD(&pkt->list); // 4. 初始化scatterlist并添加到虚拟队列 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); sgs[0] = &hdr; sg_init_one(&buf, pkt->buf, buf_len); sgs[1] = &buf; ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); if (ret) { kfree(pkt->buf); kfree(pkt); break; } // 5. 将pkt加入跟踪链表(带锁保护) spin_lock(&vsock->rx_lock); list_add(&pkt->list, &vsock->rx_pkts); vsock->rx_buf_nr++; spin_unlock(&vsock->rx_lock); } while (vq->num_free > 0); // 更新最大缓冲区计数 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) vsock->rx_buf_max_nr = vsock->rx_buf_nr; // 通知设备接收队列已准备好 virtio_vsock_notify(vsock, VSOCK_VQ_RX); } // 休眠回调:释放所有资源 static int virtio_vsock_suspend(struct device *dev) { struct virtio_device *vdev = container_of(dev, struct virtio_device, dev); struct virtio_vsock *vsock = virtio_get_drvdata(vdev); struct virtio_vsock_pkt *pkt, *tmp; unsigned long flags; vsock->suspended = true; cancel_work_sync(&vsock->rx_fill_work); // 取消未执行的工作 // 释放所有pkt和缓冲区 spin_lock_irqsave(&vsock->rx_lock, flags); list_for_each_entry_safe(pkt, tmp, &vsock->rx_pkts, list) { list_del(&pkt->list); virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX], pkt); kfree(pkt->buf); kfree(pkt); } vsock->rx_buf_nr = 0; spin_unlock_irqrestore(&vsock->rx_lock, flags); return 0; } // 唤醒回调:调度工作队列重建缓冲区 static int virtio_vsock_resume(struct device *dev) { struct virtio_device *vdev = container_of(dev, struct virtio_device, dev); struct virtio_vsock *vsock = virtio_get_drvdata(vdev); vsock->suspended = false; // 调度工作队列在进程上下文重建缓冲区 if (!work_pending(&vsock->rx_fill_work)) { queue_work(system_wq, &vsock->rx_fill_work); } return 0; } // 电源管理操作结构体 static const struct dev_pm_ops virtio_vsock_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(virtio_vsock_suspend, virtio_vsock_resume) }; // 设备初始化函数 static int virtio_vsock_probe(struct virtio_device *vdev) { struct virtio_vsock *vsock; int ret; vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); if (!vsock) return -ENOMEM; // 初始化虚拟队列(原有逻辑) ret = virtio_vsock_init_vqs(vdev, vsock); if (ret) { kfree(vsock); return ret; } // 初始化资源跟踪与工作队列 INIT_LIST_HEAD(&vsock->rx_pkts); spin_lock_init(&vsock->rx_lock); INIT_WORK(&vsock->rx_fill_work, virtio_vsock_rx_fill_work); vsock->suspended = false; virtio_set_drvdata(vdev, vsock); return 0; } // 设备移除函数 static void virtio_vsock_remove(struct virtio_device *vdev) { struct virtio_vsock *vsock = virtio_get_drvdata(vdev); // 清理逻辑(释放资源等) cancel_work_sync(&vsock->rx_fill_work); virtio_vsock_cleanup_vqs(vsock); kfree(vsock); } // VSOCK设备驱动结构体 static struct virtio_driver virtio_vsock_driver = { .driver = { .name = "virtio-vsock", .owner = THIS_MODULE, .pm = &virtio_vsock_pm_ops, }, .probe = virtio_vsock_probe, .remove = virtio_vsock_remove, // 其他驱动回调(如feature_table等) }; module_virtio_driver(virtio_vsock_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VirtIO VSOCK driver with suspend/resume optimization"); 现在需要将两份代码进行合并
最新发布
07-24
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值