UDP发送过程:copy_from_user调用路径跟踪

UDP发送过程,数据从用户空间复制到内核空间的copy_from_user调用路径跟踪

 
系统调用:sendto()------》__sys_sendto()------》 __sock_sendmsg() ----》sock_sendmsg_nosec() ------》 inet_sendmsg() ------》 udp_sendmsg()--------》 ip_make_skb()--------》 __ip_append_data()--------》 ip_generic_getfrag()--------》 csum_and_copy_from_iter_full() ----》 copy_from_user_iter_csum()--------》 csum_and_copy_from_user () ----》 copy_from_user()

net / socket.c


SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
                unsigned int, flags, struct sockaddr __user *, addr,
                int, addr_len)
{
        return __sys_sendto(fd, buff, len, flags, addr, addr_len);
}
SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
                unsigned int, flags, struct sockaddr __user *, addr,
                int, addr_len)
{
        return __sys_sendto(fd, buff, len, flags, addr, addr_len);
}

------》__sys_sendto()

net / socket.c


/*
 *      Send a datagram to a given address. We move the address into kernel
 *      space and check the user space data area is readable before invoking
 *      the protocol.
 */
int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
                 struct sockaddr __user *addr,  int addr_len)
{
        struct socket *sock;
        struct sockaddr_storage address;
        int err;
        struct msghdr msg;
        int fput_needed;

        err = import_ubuf(ITER_SOURCE, buff, len, &msg.msg_iter);
        if (unlikely(err))
                return err;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;

        msg.msg_name = NULL;
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
        msg.msg_namelen = 0;
        msg.msg_ubuf = NULL;
        if (addr) {
                err = move_addr_to_kernel(addr, addr_len, &address);
                if (err < 0)
                        goto out_put;
                msg.msg_name = (struct sockaddr *)&address;
                msg.msg_namelen = addr_len;
        }
        flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
        if (sock->file->f_flags & O_NONBLOCK)
                flags |= MSG_DONTWAIT;
        msg.msg_flags = flags;
        err = __sock_sendmsg(sock, &msg);

out_put:
        fput_light(sock->file, fput_needed);
out:
        return err;
}
/*
 *      Send a datagram to a given address. We move the address into kernel
 *      space and check the user space data area is readable before invoking
 *      the protocol.
 */
int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
                 struct sockaddr __user *addr,  int addr_len)
{
        struct socket *sock;
        struct sockaddr_storage address;
        int err;
        struct msghdr msg;
        int fput_needed;

        err = import_ubuf(ITER_SOURCE, buff, len, &msg.msg_iter);
        if (unlikely(err))
                return err;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;

        msg.msg_name = NULL;
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
        msg.msg_namelen = 0;
        msg.msg_ubuf = NULL;
        if (addr) {
                err = move_addr_to_kernel(addr, addr_len, &address);
                if (err < 0)
                        goto out_put;
                msg.msg_name = (struct sockaddr *)&address;
                msg.msg_namelen = addr_len;
        }
        flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
        if (sock->file->f_flags & O_NONBLOCK)
                flags |= MSG_DONTWAIT;
        msg.msg_flags = flags;
        err = __sock_sendmsg(sock, &msg);

out_put:
        fput_light(sock->file, fput_needed);
out:
        return err;
}

其中:

import_ubuf()-->iov_iter_ubuf()函数将用户数据地址(buff)保存在msg.msg_iter.ubuf中,并设置msg.msg_iter.iter_type = ITER_UBUF,后续通过struct msghdr msg往下传递给__sock_sendmsg()。

import_ubuf()-->iov_iter_ubuf()函数将用户数据地址(buff)保存在msg.msg_iter.ubuf中,并设置msg.msg_iter.iter_type = ITER_UBUF,后续通过struct msghdr msg往下传递给__sock_sendmsg()。

------》 __sock_sendmsg() ----》sock_sendmsg_nosec()

net / socket.c

static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
{
        int ret = INDIRECT_CALL_INET(READ_ONCE(sock->ops)->sendmsg, inet6_sendmsg,
                                     inet_sendmsg, sock, msg,
                                     msg_data_left(msg));
        BUG_ON(ret == -EIOCBQUEUED);

        if (trace_sock_send_length_enabled())
                call_trace_sock_send_length(sock->sk, ret, 0);
        return ret;
}
static int __sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
        int err = security_socket_sendmsg(sock, msg,
                                          msg_data_left(msg));

        return err ?: sock_sendmsg_nosec(sock, msg);
}static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
{
        int ret = INDIRECT_CALL_INET(READ_ONCE(sock->ops)->sendmsg, inet6_sendmsg,
                                     inet_sendmsg, sock, msg,
                                     msg_data_left(msg));
        BUG_ON(ret == -EIOCBQUEUED);

        if (trace_sock_send_length_enabled())
                call_trace_sock_send_length(sock->sk, ret, 0);
        return ret;
}
static int __sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
        int err = security_socket_sendmsg(sock, msg,
                                          msg_data_left(msg));

        return err ?: sock_sendmsg_nosec(sock, msg);
}

------》 inet_sendmsg()

IPv4:

net / ipv4 / af_inet.c

int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
        struct sock *sk = sock->sk;

        if (unlikely(inet_send_prepare(sk)))
                return -EAGAIN;

        return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
                               sk, msg, size);
}
EXPORT_SYMBOL(inet_sendmsg);int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
        struct sock *sk = sock->sk;

        if (unlikely(inet_send_prepare(sk)))
                return -EAGAIN;

        return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
                               sk, msg, size);
}
EXPORT_SYMBOL(inet_sendmsg);

------》 udp_sendmsg()

net / ipv4 / udp.c

int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
        struct inet_sock *inet = inet_sk(sk);
        struct udp_sock *up = udp_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
        struct flowi4 fl4_stack;
        struct flowi4 *fl4;
        int ulen = len;
        struct ipcm_cookie ipc;
        struct rtable *rt = NULL;
        int free = 0;
        int connected = 0;
        __be32 daddr, faddr, saddr;
        u8 tos, scope;
        __be16 dport;
        int err, is_udplite = IS_UDPLITE(sk);
        int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
        struct sk_buff *skb;
        struct ip_options_data opt_copy;
        int uc_index;

        if (len > 0xFFFF)
                return -EMSGSIZE;

        /*
         *      Check the flags.
         */

        if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
                return -EOPNOTSUPP;

        getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;

        fl4 = &inet->cork.fl.u.ip4;
        if (READ_ONCE(up->pending)) {
                /*
                 * There are pending frames.
                 * The socket lock must be held while it's corked.
                 */
                lock_sock(sk);
                if (likely(up->pending)) {
                        if (unlikely(up->pending != AF_INET)) {
                                release_sock(sk);
                                return -EINVAL;
                        }
                        goto do_append_data;
                }
                release_sock(sk);
        }
        ulen += sizeof(struct udphdr);

        /*
         *      Get and verify the address.
         */
        if (usin) {
                if (msg->msg_namelen < sizeof(*usin))
                        return -EINVAL;
                if (usin->sin_family != AF_INET) {
                        if (usin->sin_family != AF_UNSPEC)
                                return -EAFNOSUPPORT;
                }

                daddr = usin->sin_addr.s_addr;
                dport = usin->sin_port;
                if (dport == 0)
                        return -EINVAL;
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
                daddr = inet->inet_daddr;
                dport = inet->inet_dport;
                /* Open fast path for connected socket.
                   Route will not be used, if at least one option is set.
                 */
                connected = 1;
        }

        ipcm_init_sk(&ipc, inet);
        ipc.gso_size = READ_ONCE(up->gso_size);

        if (msg->msg_controllen) {
                err = udp_cmsg_send(sk, msg, &ipc.gso_size);
                if (err > 0) {
                        err = ip_cmsg_send(sk, msg, &ipc,
                                           sk->sk_family == AF_INET6);
                        connected = 0;
                }
                if (unlikely(err < 0)) {
                        kfree(ipc.opt);
                        return err;
                }
                if (ipc.opt)
                        free = 1;
        }
        if (!ipc.opt) {
                struct ip_options_rcu *inet_opt;

                rcu_read_lock();
                inet_opt = rcu_dereference(inet->inet_opt);
                if (inet_opt) {
                        memcpy(&opt_copy, inet_opt,
                               sizeof(*inet_opt) + inet_opt->opt.optlen);
                        ipc.opt = &opt_copy.opt;
                }
                rcu_read_unlock();
        }

        if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) {
                err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
                                            (struct sockaddr *)usin,
                                            &msg->msg_namelen,
                                            &ipc.addr);
                if (err)
                        goto out_free;
                if (usin) {
                        if (usin->sin_port == 0) {
                                /* BPF program set invalid port. Reject it. */
                                err = -EINVAL;
                                goto out_free;
                        }
                        daddr = usin->sin_addr.s_addr;
                        dport = usin->sin_port;
                }
        }

        saddr = ipc.addr;
        ipc.addr = faddr = daddr;

        if (ipc.opt && ipc.opt->opt.srr) {
                if (!daddr) {
                        err = -EINVAL;
                        goto out_free;
                }
                faddr = ipc.opt->opt.faddr;
                connected = 0;
        }
        tos = get_rttos(&ipc, inet);
        scope = ip_sendmsg_scope(inet, &ipc, msg);
        if (scope == RT_SCOPE_LINK)
                connected = 0;

        uc_index = READ_ONCE(inet->uc_index);
        if (ipv4_is_multicast(daddr)) {
                if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
                        ipc.oif = READ_ONCE(inet->mc_index);
                if (!saddr)
                        saddr = READ_ONCE(inet->mc_addr);
                connected = 0;
        } else if (!ipc.oif) {
                ipc.oif = uc_index;
        } else if (ipv4_is_lbcast(daddr) && uc_index) {
                /* oif is set, packet is to local broadcast and
                 * uc_index is set. oif is most likely set
                 * by sk_bound_dev_if. If uc_index != oif check if the
                 * oif is an L3 master and uc_index is an L3 slave.
                 * If so, we want to allow the send using the uc_index.
                 */
                if (ipc.oif != uc_index &&
                    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
                                                              uc_index)) {
                        ipc.oif = uc_index;
                }
        }

        if (connected)
                rt = dst_rtable(sk_dst_check(sk, 0));

        if (!rt) {
                struct net *net = sock_net(sk);
                __u8 flow_flags = inet_sk_flowi_flags(sk);

                fl4 = &fl4_stack;

                flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, scope,
                                   sk->sk_protocol, flow_flags, faddr, saddr,
                                   dport, inet->inet_sport, sk->sk_uid);

                security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
                rt = ip_route_output_flow(net, fl4, sk);
                if (IS_ERR(rt)) {
                        err = PTR_ERR(rt);
                        rt = NULL;
                        if (err == -ENETUNREACH)
                                IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
                        goto out;
                }

                err = -EACCES;
                if ((rt->rt_flags & RTCF_BROADCAST) &&
                    !sock_flag(sk, SOCK_BROADCAST))
                        goto out;
                if (connected)
                        sk_dst_set(sk, dst_clone(&rt->dst));
        }

        if (msg->msg_flags&MSG_CONFIRM)
                goto do_confirm;
back_from_confirm:

        saddr = fl4->saddr;
        if (!ipc.addr)
                daddr = ipc.addr = fl4->daddr;

        /* Lockless fast path for the non-corking case. */
        if (!corkreq) {
                struct inet_cork cork;

                skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
                                  sizeof(struct udphdr), &ipc, &rt,
                                  &cork, msg->msg_flags);
                err = PTR_ERR(skb);
                if (!IS_ERR_OR_NULL(skb))
                        err = udp_send_skb(skb, fl4, &cork);
                goto out;
        }

        lock_sock(sk);
        if (unlikely(up->pending)) {
                /* The socket is already corked while preparing it. */
                /* ... which is an evident application bug. --ANK */
                release_sock(sk);

                net_dbg_ratelimited("socket already corked\n");
                err = -EINVAL;
                goto out;
        }
        /*
         *      Now cork the socket to pend data.
         */
        fl4 = &inet->cork.fl.u.ip4;
        fl4->daddr = daddr;
        fl4->saddr = saddr;
        fl4->fl4_dport = dport;
        fl4->fl4_sport = inet->inet_sport;
        WRITE_ONCE(up->pending, AF_INET);

do_append_data:
        up->len += ulen;
        err = ip_append_data(sk, fl4, getfrag, msg, ulen,
                             sizeof(struct udphdr), &ipc, &rt,
                             corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
        if (err)
                udp_flush_pending_frames(sk);
        else if (!corkreq)
                err = udp_push_pending_frames(sk);
        else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
                WRITE_ONCE(up->pending, 0);
        release_sock(sk);

out:
        ip_rt_put(rt);
out_free:
        if (free)
                kfree(ipc.opt);
        if (!err)
                return len;
        /*
         * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
         * ENOBUFS might not be good (it's not tunable per se), but otherwise
         * we don't have a good statistic (IpOutDiscards but it can be too many
         * things).  We could add another new stat but at least for now that
         * seems like overkill.
         */
        if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
                UDP_INC_STATS(sock_net(sk),
                              UDP_MIB_SNDBUFERRORS, is_udplite);
        }
        return err;

do_confirm:
        if (msg->msg_flags & MSG_PROBE)
                dst_confirm_neigh(&rt->dst, &fl4->daddr);
        if (!(msg->msg_flags&MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
        goto out;
}
EXPORT_SYMBOL(udp_sendmsg);
其中,以getfrag =  ip_generic_getfrag为例:
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
        struct inet_sock *inet = inet_sk(sk);
        struct udp_sock *up = udp_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
        struct flowi4 fl4_stack;
        struct flowi4 *fl4;
        int ulen = len;
        struct ipcm_cookie ipc;
        struct rtable *rt = NULL;
        int free = 0;
        int connected = 0;
        __be32 daddr, faddr, saddr;
        u8 tos, scope;
        __be16 dport;
        int err, is_udplite = IS_UDPLITE(sk);
        int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
        struct sk_buff *skb;
        struct ip_options_data opt_copy;
        int uc_index;

        if (len > 0xFFFF)
                return -EMSGSIZE;

        /*
         *      Check the flags.
         */

        if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
                return -EOPNOTSUPP;

        getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;

        fl4 = &inet->cork.fl.u.ip4;
        if (READ_ONCE(up->pending)) {
                /*
                 * There are pending frames.
                 * The socket lock must be held while it's corked.
                 */
                lock_sock(sk);
                if (likely(up->pending)) {
                        if (unlikely(up->pending != AF_INET)) {
                                release_sock(sk);
                                return -EINVAL;
                        }
                        goto do_append_data;
                }
                release_sock(sk);
        }
        ulen += sizeof(struct udphdr);

        /*
         *      Get and verify the address.
         */
        if (usin) {
                if (msg->msg_namelen < sizeof(*usin))
                        return -EINVAL;
                if (usin->sin_family != AF_INET) {
                        if (usin->sin_family != AF_UNSPEC)
                                return -EAFNOSUPPORT;
                }

                daddr = usin->sin_addr.s_addr;
                dport = usin->sin_port;
                if (dport == 0)
                        return -EINVAL;
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
                daddr = inet->inet_daddr;
                dport = inet->inet_dport;
                /* Open fast path for connected socket.
                   Route will not be used, if at least one option is set.
                 */
                connected = 1;
        }

        ipcm_init_sk(&ipc, inet);
        ipc.gso_size = READ_ONCE(up->gso_size);

        if (msg->msg_controllen) {
                err = udp_cmsg_send(sk, msg, &ipc.gso_size);
                if (err > 0) {
                        err = ip_cmsg_send(sk, msg, &ipc,
                                           sk->sk_family == AF_INET6);
                        connected = 0;
                }
                if (unlikely(err < 0)) {
                        kfree(ipc.opt);
                        return err;
                }
                if (ipc.opt)
                        free = 1;
        }
        if (!ipc.opt) {
                struct ip_options_rcu *inet_opt;

                rcu_read_lock();
                inet_opt = rcu_dereference(inet->inet_opt);
                if (inet_opt) {
                        memcpy(&opt_copy, inet_opt,
                               sizeof(*inet_opt) + inet_opt->opt.optlen);
                        ipc.opt = &opt_copy.opt;
                }
                rcu_read_unlock();
        }

        if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) {
                err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
                                            (struct sockaddr *)usin,
                                            &msg->msg_namelen,
                                            &ipc.addr);
                if (err)
                        goto out_free;
                if (usin) {
                        if (usin->sin_port == 0) {
                                /* BPF program set invalid port. Reject it. */
                                err = -EINVAL;
                                goto out_free;
                        }
                        daddr = usin->sin_addr.s_addr;
                        dport = usin->sin_port;
                }
        }

        saddr = ipc.addr;
        ipc.addr = faddr = daddr;

        if (ipc.opt && ipc.opt->opt.srr) {
                if (!daddr) {
                        err = -EINVAL;
                        goto out_free;
                }
                faddr = ipc.opt->opt.faddr;
                connected = 0;
        }
        tos = get_rttos(&ipc, inet);
        scope = ip_sendmsg_scope(inet, &ipc, msg);
        if (scope == RT_SCOPE_LINK)
                connected = 0;

        uc_index = READ_ONCE(inet->uc_index);
        if (ipv4_is_multicast(daddr)) {
                if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
                        ipc.oif = READ_ONCE(inet->mc_index);
                if (!saddr)
                        saddr = READ_ONCE(inet->mc_addr);
                connected = 0;
        } else if (!ipc.oif) {
                ipc.oif = uc_index;
        } else if (ipv4_is_lbcast(daddr) && uc_index) {
                /* oif is set, packet is to local broadcast and
                 * uc_index is set. oif is most likely set
                 * by sk_bound_dev_if. If uc_index != oif check if the
                 * oif is an L3 master and uc_index is an L3 slave.
                 * If so, we want to allow the send using the uc_index.
                 */
                if (ipc.oif != uc_index &&
                    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
                                                              uc_index)) {
                        ipc.oif = uc_index;
                }
        }

        if (connected)
                rt = dst_rtable(sk_dst_check(sk, 0));

        if (!rt) {
                struct net *net = sock_net(sk);
                __u8 flow_flags = inet_sk_flowi_flags(sk);

                fl4 = &fl4_stack;

                flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, scope,
                                   sk->sk_protocol, flow_flags, faddr, saddr,
                                   dport, inet->inet_sport, sk->sk_uid);

                security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
                rt = ip_route_output_flow(net, fl4, sk);
                if (IS_ERR(rt)) {
                        err = PTR_ERR(rt);
                        rt = NULL;
                        if (err == -ENETUNREACH)
                                IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
                        goto out;
                }

                err = -EACCES;
                if ((rt->rt_flags & RTCF_BROADCAST) &&
                    !sock_flag(sk, SOCK_BROADCAST))
                        goto out;
                if (connected)
                        sk_dst_set(sk, dst_clone(&rt->dst));
        }

        if (msg->msg_flags&MSG_CONFIRM)
                goto do_confirm;
back_from_confirm:

        saddr = fl4->saddr;
        if (!ipc.addr)
                daddr = ipc.addr = fl4->daddr;

        /* Lockless fast path for the non-corking case. */
        if (!corkreq) {
                struct inet_cork cork;

                skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
                                  sizeof(struct udphdr), &ipc, &rt,
                                  &cork, msg->msg_flags);
                err = PTR_ERR(skb);
                if (!IS_ERR_OR_NULL(skb))
                        err = udp_send_skb(skb, fl4, &cork);
                goto out;
        }

        lock_sock(sk);
        if (unlikely(up->pending)) {
                /* The socket is already corked while preparing it. */
                /* ... which is an evident application bug. --ANK */
                release_sock(sk);

                net_dbg_ratelimited("socket already corked\n");
                err = -EINVAL;
                goto out;
        }
        /*
         *      Now cork the socket to pend data.
         */
        fl4 = &inet->cork.fl.u.ip4;
        fl4->daddr = daddr;
        fl4->saddr = saddr;
        fl4->fl4_dport = dport;
        fl4->fl4_sport = inet->inet_sport;
        WRITE_ONCE(up->pending, AF_INET);

do_append_data:
        up->len += ulen;
        err = ip_append_data(sk, fl4, getfrag, msg, ulen,
                             sizeof(struct udphdr), &ipc, &rt,
                             corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
        if (err)
                udp_flush_pending_frames(sk);
        else if (!corkreq)
                err = udp_push_pending_frames(sk);
        else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
                WRITE_ONCE(up->pending, 0);
        release_sock(sk);

out:
        ip_rt_put(rt);
out_free:
        if (free)
                kfree(ipc.opt);
        if (!err)
                return len;
        /*
         * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
         * ENOBUFS might not be good (it's not tunable per se), but otherwise
         * we don't have a good statistic (IpOutDiscards but it can be too many
         * things).  We could add another new stat but at least for now that
         * seems like overkill.
         */
        if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
                UDP_INC_STATS(sock_net(sk),
                              UDP_MIB_SNDBUFERRORS, is_udplite);
        }
        return err;

do_confirm:
        if (msg->msg_flags & MSG_PROBE)
                dst_confirm_neigh(&rt->dst, &fl4->daddr);
        if (!(msg->msg_flags&MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
        goto out;
}
EXPORT_SYMBOL(udp_sendmsg);
其中,以getfrag =  ip_generic_getfrag为例:
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;

--------》 ip_make_skb()

net / ipv4 / ip_output.c

struct sk_buff *ip_make_skb(struct sock *sk,
                            struct flowi4 *fl4,
                            int getfrag(void *from, char *to, int offset,
                                        int len, int odd, struct sk_buff *skb),
                            void *from, int length, int transhdrlen,
                            struct ipcm_cookie *ipc, struct rtable **rtp,
                            struct inet_cork *cork, unsigned int flags)
{
        struct sk_buff_head queue;
        int err;

        if (flags & MSG_PROBE)
                return NULL;

        __skb_queue_head_init(&queue);

        cork->flags = 0;
        cork->addr = 0;
        cork->opt = NULL;
        err = ip_setup_cork(sk, cork, ipc, rtp);
        if (err)
                return ERR_PTR(err);

        err = __ip_append_data(sk, fl4, &queue, cork,
                               &current->task_frag, getfrag,
                               from, length, transhdrlen, flags);
        if (err) {
                __ip_flush_pending_frames(sk, &queue, cork);
                return ERR_PTR(err);
        }

        return __ip_make_skb(sk, fl4, &queue, cork);
}struct sk_buff *ip_make_skb(struct sock *sk,
                            struct flowi4 *fl4,
                            int getfrag(void *from, char *to, int offset,
                                        int len, int odd, struct sk_buff *skb),
                            void *from, int length, int transhdrlen,
                            struct ipcm_cookie *ipc, struct rtable **rtp,
                            struct inet_cork *cork, unsigned int flags)
{
        struct sk_buff_head queue;
        int err;

        if (flags & MSG_PROBE)
                return NULL;

        __skb_queue_head_init(&queue);

        cork->flags = 0;
        cork->addr = 0;
        cork->opt = NULL;
        err = ip_setup_cork(sk, cork, ipc, rtp);
        if (err)
                return ERR_PTR(err);

        err = __ip_append_data(sk, fl4, &queue, cork,
                               &current->task_frag, getfrag,
                               from, length, transhdrlen, flags);
        if (err) {
                __ip_flush_pending_frames(sk, &queue, cork);
                return ERR_PTR(err);
        }

        return __ip_make_skb(sk, fl4, &queue, cork);
}

--------》 __ip_append_data()

net / ipv4 / ip_output.c

static int __ip_append_data(struct sock *sk,
                            struct flowi4 *fl4,
                            struct sk_buff_head *queue,
                            struct inet_cork *cork,
                            struct page_frag *pfrag,
                            int getfrag(void *from, char *to, int offset,
                                        int len, int odd, struct sk_buff *skb),
                            void *from, int length, int transhdrlen,
                            unsigned int flags)
{
        struct inet_sock *inet = inet_sk(sk);
        struct ubuf_info *uarg = NULL;
        struct sk_buff *skb;
        struct ip_options *opt = cork->opt;
        int hh_len;
        int exthdrlen;
        int mtu;
        int copy;
        int err;
        int offset = 0;
        bool zc = false;
        unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
        int csummode = CHECKSUM_NONE;
        struct rtable *rt = dst_rtable(cork->dst);
        bool paged, hold_tskey, extra_uref = false;
        unsigned int wmem_alloc_delta = 0;
        u32 tskey = 0;

        skb = skb_peek_tail(queue);

        exthdrlen = !skb ? rt->dst.header_len : 0;
        mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
        paged = !!cork->gso_size;

        hh_len = LL_RESERVED_SPACE(rt->dst.dev);

        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
        maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;

        if (cork->length + length > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
                               mtu - (opt ? opt->optlen : 0));
                return -EMSGSIZE;
        }

        /*
         * transhdrlen > 0 means that this is the first fragment and we wish
         * it won't be fragmented in the future.
         */
        if (transhdrlen &&
            length + fragheaderlen <= mtu &&
            rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
            (!(flags & MSG_MORE) || cork->gso_size) &&
            (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
                csummode = CHECKSUM_PARTIAL;

        if ((flags & MSG_ZEROCOPY) && length) {
                struct msghdr *msg = from;

                if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
                        if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
                                return -EINVAL;

                        /* Leave uarg NULL if can't zerocopy, callers should
                         * be able to handle it.
                         */
                        if ((rt->dst.dev->features & NETIF_F_SG) &&
                            csummode == CHECKSUM_PARTIAL) {
                                paged = true;
                                zc = true;
                                uarg = msg->msg_ubuf;
                        }
                } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
                        uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
                        if (!uarg)
                                return -ENOBUFS;
                        extra_uref = !skb_zcopy(skb);   /* only ref on new uarg */
                        if (rt->dst.dev->features & NETIF_F_SG &&
                            csummode == CHECKSUM_PARTIAL) {
                                paged = true;
                                zc = true;
                        } else {
                                uarg_to_msgzc(uarg)->zerocopy = 0;
                                skb_zcopy_set(skb, uarg, &extra_uref);
                        }
                }
        } else if ((flags & MSG_SPLICE_PAGES) && length) {
                if (inet_test_bit(HDRINCL, sk))
                        return -EPERM;
                if (rt->dst.dev->features & NETIF_F_SG &&
                    getfrag == ip_generic_getfrag)
                        /* We need an empty buffer to attach stuff to */
                        paged = true;
                else
                        flags &= ~MSG_SPLICE_PAGES;
        }

        cork->length += length;

        hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
                     READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
        if (hold_tskey)
                tskey = atomic_inc_return(&sk->sk_tskey) - 1;

        /* So, what's going on in the loop below?
         *
         * We use calculated fragment length to generate chained skb,
         * each of segments is IP fragment ready for sending to network after
         * adding appropriate IP header.
         */

        if (!skb)
                goto alloc_new_skb;

        while (length > 0) {
                /* Check if the remaining data fits into current packet. */
                copy = mtu - skb->len;
                if (copy < length)
                        copy = maxfraglen - skb->len;
                if (copy <= 0) {
                        char *data;
                        unsigned int datalen;
                        unsigned int fraglen;
                        unsigned int fraggap;
                        unsigned int alloclen, alloc_extra;
                        unsigned int pagedlen;
                        struct sk_buff *skb_prev;
alloc_new_skb:
                        skb_prev = skb;
                        if (skb_prev)
                                fraggap = skb_prev->len - maxfraglen;
                        else
                                fraggap = 0;

                        /*
                         * If remaining data exceeds the mtu,
                         * we know we need more fragment(s).
                         */
                        datalen = length + fraggap;
                        if (datalen > mtu - fragheaderlen)
                                datalen = maxfraglen - fragheaderlen;
                        fraglen = datalen + fragheaderlen;
                        pagedlen = 0;

                        alloc_extra = hh_len + 15;
                        alloc_extra += exthdrlen;

                        /* The last fragment gets additional space at tail.
                         * Note, with MSG_MORE we overallocate on fragments,
                         * because we have no idea what fragment will be
                         * the last.
                         */
                        if (datalen == length + fraggap)
                                alloc_extra += rt->dst.trailer_len;

                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
                        else if (!paged &&
                                 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
                                  !(rt->dst.dev->features & NETIF_F_SG)))
                                alloclen = fraglen;
                        else {
                                alloclen = fragheaderlen + transhdrlen;
                                pagedlen = datalen - transhdrlen;
                        }

                        alloclen += alloc_extra;

                        if (transhdrlen) {
                                skb = sock_alloc_send_skb(sk, alloclen,
                                                (flags & MSG_DONTWAIT), &err);
                        } else {
                                skb = NULL;
                                if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
                                    2 * sk->sk_sndbuf)
                                        skb = alloc_skb(alloclen,
                                                        sk->sk_allocation);
                                if (unlikely(!skb))
                                        err = -ENOBUFS;
                        }
                        if (!skb)
                                goto error;

                        /*
                         *      Fill in the control structures
                         */
                        skb->ip_summed = csummode;
                        skb->csum = 0;
                        skb_reserve(skb, hh_len);

                        /*
                         *      Find where to start putting bytes.
                         */
                        data = skb_put(skb, fraglen + exthdrlen - pagedlen);
                        skb_set_network_header(skb, exthdrlen);
                        skb->transport_header = (skb->network_header +
                                                 fragheaderlen);
                        data += fragheaderlen + exthdrlen;

                        if (fraggap) {
                                skb->csum = skb_copy_and_csum_bits(
                                        skb_prev, maxfraglen,
                                        data + transhdrlen, fraggap);
                                skb_prev->csum = csum_sub(skb_prev->csum,
                                                          skb->csum);
                                data += fraggap;
                                pskb_trim_unique(skb_prev, maxfraglen);
                        }

                        copy = datalen - transhdrlen - fraggap - pagedlen;
                        /* [!] NOTE: copy will be negative if pagedlen>0
                         * because then the equation reduces to -fraggap.
                         */
                        if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
                                err = -EFAULT;
                                kfree_skb(skb);
                                goto error;
                        } else if (flags & MSG_SPLICE_PAGES) {
                                copy = 0;
                        }

                        offset += copy;
                        length -= copy + transhdrlen;
                        transhdrlen = 0;
                        exthdrlen = 0;
                        csummode = CHECKSUM_NONE;

                        /* only the initial fragment is time stamped */
                        skb_shinfo(skb)->tx_flags = cork->tx_flags;
                        cork->tx_flags = 0;
                        skb_shinfo(skb)->tskey = tskey;
                        tskey = 0;
                        skb_zcopy_set(skb, uarg, &extra_uref);

                        if ((flags & MSG_CONFIRM) && !skb_prev)
                                skb_set_dst_pending_confirm(skb, 1);

                        /*
                         * Put the packet on the pending queue.
                         */
                        if (!skb->destructor) {
                                skb->destructor = sock_wfree;
                                skb->sk = sk;
                                wmem_alloc_delta += skb->truesize;
                        }
                        __skb_queue_tail(queue, skb);
                        continue;
                }

                if (copy > length)
                        copy = length;

                if (!(rt->dst.dev->features&NETIF_F_SG) &&
                    skb_tailroom(skb) >= copy) {
                        unsigned int off;

                        off = skb->len;
                        if (getfrag(from, skb_put(skb, copy),
                                        offset, copy, off, skb) < 0) {
                                __skb_trim(skb, off);
                                err = -EFAULT;
                                goto error;
                        }
                } else if (flags & MSG_SPLICE_PAGES) {
                        struct msghdr *msg = from;

                        err = -EIO;
                        if (WARN_ON_ONCE(copy > msg->msg_iter.count))
                                goto error;

                        err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
                                                   sk->sk_allocation);
                        if (err < 0)
                                goto error;
                        copy = err;
                        wmem_alloc_delta += copy;
                } else if (!zc) {
                        int i = skb_shinfo(skb)->nr_frags;

                        err = -ENOMEM;
                        if (!sk_page_frag_refill(sk, pfrag))
                                goto error;

                        skb_zcopy_downgrade_managed(skb);
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
                                err = -EMSGSIZE;
                                if (i == MAX_SKB_FRAGS)
                                        goto error;

                                __skb_fill_page_desc(skb, i, pfrag->page,
                                                     pfrag->offset, 0);
                                skb_shinfo(skb)->nr_frags = ++i;
                                get_page(pfrag->page);
                        }
                        copy = min_t(int, copy, pfrag->size - pfrag->offset);
                        if (getfrag(from,
                                    page_address(pfrag->page) + pfrag->offset,
                                    offset, copy, skb->len, skb) < 0)
                                goto error_efault;

                        pfrag->offset += copy;
                        skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                        skb_len_add(skb, copy);
                        wmem_alloc_delta += copy;
                } else {
                        err = skb_zerocopy_iter_dgram(skb, from, copy);
                        if (err < 0)
                                goto error;
                }
                offset += copy;
                length -= copy;
        }

        if (wmem_alloc_delta)
                refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
        return 0;

error_efault:
        err = -EFAULT;
error:
        net_zcopy_put_abort(uarg, extra_uref);
        cork->length -= length;
        IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
        refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
        if (hold_tskey)
                atomic_dec(&sk->sk_tskey);
        return err;
}static int __ip_append_data(struct sock *sk,
                            struct flowi4 *fl4,
                            struct sk_buff_head *queue,
                            struct inet_cork *cork,
                            struct page_frag *pfrag,
                            int getfrag(void *from, char *to, int offset,
                                        int len, int odd, struct sk_buff *skb),
                            void *from, int length, int transhdrlen,
                            unsigned int flags)
{
        struct inet_sock *inet = inet_sk(sk);
        struct ubuf_info *uarg = NULL;
        struct sk_buff *skb;
        struct ip_options *opt = cork->opt;
        int hh_len;
        int exthdrlen;
        int mtu;
        int copy;
        int err;
        int offset = 0;
        bool zc = false;
        unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
        int csummode = CHECKSUM_NONE;
        struct rtable *rt = dst_rtable(cork->dst);
        bool paged, hold_tskey, extra_uref = false;
        unsigned int wmem_alloc_delta = 0;
        u32 tskey = 0;

        skb = skb_peek_tail(queue);

        exthdrlen = !skb ? rt->dst.header_len : 0;
        mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
        paged = !!cork->gso_size;

        hh_len = LL_RESERVED_SPACE(rt->dst.dev);

        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
        maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;

        if (cork->length + length > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
                               mtu - (opt ? opt->optlen : 0));
                return -EMSGSIZE;
        }

        /*
         * transhdrlen > 0 means that this is the first fragment and we wish
         * it won't be fragmented in the future.
         */
        if (transhdrlen &&
            length + fragheaderlen <= mtu &&
            rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
            (!(flags & MSG_MORE) || cork->gso_size) &&
            (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
                csummode = CHECKSUM_PARTIAL;

        if ((flags & MSG_ZEROCOPY) && length) {
                struct msghdr *msg = from;

                if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
                        if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
                                return -EINVAL;

                        /* Leave uarg NULL if can't zerocopy, callers should
                         * be able to handle it.
                         */
                        if ((rt->dst.dev->features & NETIF_F_SG) &&
                            csummode == CHECKSUM_PARTIAL) {
                                paged = true;
                                zc = true;
                                uarg = msg->msg_ubuf;
                        }
                } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
                        uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
                        if (!uarg)
                                return -ENOBUFS;
                        extra_uref = !skb_zcopy(skb);   /* only ref on new uarg */
                        if (rt->dst.dev->features & NETIF_F_SG &&
                            csummode == CHECKSUM_PARTIAL) {
                                paged = true;
                                zc = true;
                        } else {
                                uarg_to_msgzc(uarg)->zerocopy = 0;
                                skb_zcopy_set(skb, uarg, &extra_uref);
                        }
                }
        } else if ((flags & MSG_SPLICE_PAGES) && length) {
                if (inet_test_bit(HDRINCL, sk))
                        return -EPERM;
                if (rt->dst.dev->features & NETIF_F_SG &&
                    getfrag == ip_generic_getfrag)
                        /* We need an empty buffer to attach stuff to */
                        paged = true;
                else
                        flags &= ~MSG_SPLICE_PAGES;
        }

        cork->length += length;

        hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
                     READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
        if (hold_tskey)
                tskey = atomic_inc_return(&sk->sk_tskey) - 1;

        /* So, what's going on in the loop below?
         *
         * We use calculated fragment length to generate chained skb,
         * each of segments is IP fragment ready for sending to network after
         * adding appropriate IP header.
         */

        if (!skb)
                goto alloc_new_skb;

        while (length > 0) {
                /* Check if the remaining data fits into current packet. */
                copy = mtu - skb->len;
                if (copy < length)
                        copy = maxfraglen - skb->len;
                if (copy <= 0) {
                        char *data;
                        unsigned int datalen;
                        unsigned int fraglen;
                        unsigned int fraggap;
                        unsigned int alloclen, alloc_extra;
                        unsigned int pagedlen;
                        struct sk_buff *skb_prev;
alloc_new_skb:
                        skb_prev = skb;
                        if (skb_prev)
                                fraggap = skb_prev->len - maxfraglen;
                        else
                                fraggap = 0;

                        /*
                         * If remaining data exceeds the mtu,
                         * we know we need more fragment(s).
                         */
                        datalen = length + fraggap;
                        if (datalen > mtu - fragheaderlen)
                                datalen = maxfraglen - fragheaderlen;
                        fraglen = datalen + fragheaderlen;
                        pagedlen = 0;

                        alloc_extra = hh_len + 15;
                        alloc_extra += exthdrlen;

                        /* The last fragment gets additional space at tail.
                         * Note, with MSG_MORE we overallocate on fragments,
                         * because we have no idea what fragment will be
                         * the last.
                         */
                        if (datalen == length + fraggap)
                                alloc_extra += rt->dst.trailer_len;

                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
                        else if (!paged &&
                                 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
                                  !(rt->dst.dev->features & NETIF_F_SG)))
                                alloclen = fraglen;
                        else {
                                alloclen = fragheaderlen + transhdrlen;
                                pagedlen = datalen - transhdrlen;
                        }

                        alloclen += alloc_extra;

                        if (transhdrlen) {
                                skb = sock_alloc_send_skb(sk, alloclen,
                                                (flags & MSG_DONTWAIT), &err);
                        } else {
                                skb = NULL;
                                if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
                                    2 * sk->sk_sndbuf)
                                        skb = alloc_skb(alloclen,
                                                        sk->sk_allocation);
                                if (unlikely(!skb))
                                        err = -ENOBUFS;
                        }
                        if (!skb)
                                goto error;

                        /*
                         *      Fill in the control structures
                         */
                        skb->ip_summed = csummode;
                        skb->csum = 0;
                        skb_reserve(skb, hh_len);

                        /*
                         *      Find where to start putting bytes.
                         */
                        data = skb_put(skb, fraglen + exthdrlen - pagedlen);
                        skb_set_network_header(skb, exthdrlen);
                        skb->transport_header = (skb->network_header +
                                                 fragheaderlen);
                        data += fragheaderlen + exthdrlen;

                        if (fraggap) {
                                skb->csum = skb_copy_and_csum_bits(
                                        skb_prev, maxfraglen,
                                        data + transhdrlen, fraggap);
                                skb_prev->csum = csum_sub(skb_prev->csum,
                                                          skb->csum);
                                data += fraggap;
                                pskb_trim_unique(skb_prev, maxfraglen);
                        }

                        copy = datalen - transhdrlen - fraggap - pagedlen;
                        /* [!] NOTE: copy will be negative if pagedlen>0
                         * because then the equation reduces to -fraggap.
                         */
                        if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
                                err = -EFAULT;
                                kfree_skb(skb);
                                goto error;
                        } else if (flags & MSG_SPLICE_PAGES) {
                                copy = 0;
                        }

                        offset += copy;
                        length -= copy + transhdrlen;
                        transhdrlen = 0;
                        exthdrlen = 0;
                        csummode = CHECKSUM_NONE;

                        /* only the initial fragment is time stamped */
                        skb_shinfo(skb)->tx_flags = cork->tx_flags;
                        cork->tx_flags = 0;
                        skb_shinfo(skb)->tskey = tskey;
                        tskey = 0;
                        skb_zcopy_set(skb, uarg, &extra_uref);

                        if ((flags & MSG_CONFIRM) && !skb_prev)
                                skb_set_dst_pending_confirm(skb, 1);

                        /*
                         * Put the packet on the pending queue.
                         */
                        if (!skb->destructor) {
                                skb->destructor = sock_wfree;
                                skb->sk = sk;
                                wmem_alloc_delta += skb->truesize;
                        }
                        __skb_queue_tail(queue, skb);
                        continue;
                }

                if (copy > length)
                        copy = length;

                if (!(rt->dst.dev->features&NETIF_F_SG) &&
                    skb_tailroom(skb) >= copy) {
                        unsigned int off;

                        off = skb->len;
                        if (getfrag(from, skb_put(skb, copy),
                                        offset, copy, off, skb) < 0) {
                                __skb_trim(skb, off);
                                err = -EFAULT;
                                goto error;
                        }
                } else if (flags & MSG_SPLICE_PAGES) {
                        struct msghdr *msg = from;

                        err = -EIO;
                        if (WARN_ON_ONCE(copy > msg->msg_iter.count))
                                goto error;

                        err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
                                                   sk->sk_allocation);
                        if (err < 0)
                                goto error;
                        copy = err;
                        wmem_alloc_delta += copy;
                } else if (!zc) {
                        int i = skb_shinfo(skb)->nr_frags;

                        err = -ENOMEM;
                        if (!sk_page_frag_refill(sk, pfrag))
                                goto error;

                        skb_zcopy_downgrade_managed(skb);
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
                                err = -EMSGSIZE;
                                if (i == MAX_SKB_FRAGS)
                                        goto error;

                                __skb_fill_page_desc(skb, i, pfrag->page,
                                                     pfrag->offset, 0);
                                skb_shinfo(skb)->nr_frags = ++i;
                                get_page(pfrag->page);
                        }
                        copy = min_t(int, copy, pfrag->size - pfrag->offset);
                        if (getfrag(from,
                                    page_address(pfrag->page) + pfrag->offset,
                                    offset, copy, skb->len, skb) < 0)
                                goto error_efault;

                        pfrag->offset += copy;
                        skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                        skb_len_add(skb, copy);
                        wmem_alloc_delta += copy;
                } else {
                        err = skb_zerocopy_iter_dgram(skb, from, copy);
                        if (err < 0)
                                goto error;
                }
                offset += copy;
                length -= copy;
        }

        if (wmem_alloc_delta)
                refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
        return 0;

error_efault:
        err = -EFAULT;
error:
        net_zcopy_put_abort(uarg, extra_uref);
        cork->length -= length;
        IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
        refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
        if (hold_tskey)
                atomic_dec(&sk->sk_tskey);
        return err;
}

--------》 ip_generic_getfrag()

net / ipv4 / ip_output.c

int
ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
{
        struct msghdr *msg = from;

        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                if (!copy_from_iter_full(to, len, &msg->msg_iter))
                        return -EFAULT;
        } else {
                __wsum csum = 0;
                if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
                        return -EFAULT;
                skb->csum = csum_block_add(skb->csum, csum, odd);
        }
        return 0;
}
EXPORT_SYMBOL(ip_generic_getfrag);int
ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
{
        struct msghdr *msg = from;

        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                if (!copy_from_iter_full(to, len, &msg->msg_iter))
                        return -EFAULT;
        } else {
                __wsum csum = 0;
                if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
                        return -EFAULT;
                skb->csum = csum_block_add(skb->csum, csum, odd);
        }
        return 0;
}
EXPORT_SYMBOL(ip_generic_getfrag);

--------》 csum_and_copy_from_iter_full() ----》 copy_from_user_iter_csum()

net / core / skbuff.c

bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
                                  __wsum *csum, struct iov_iter *i)
{
        size_t copied;

        if (WARN_ON_ONCE(!i->data_source))
                return false;
        copied = iterate_and_advance2(i, bytes, addr, csum,
                                      copy_from_user_iter_csum,
                                      memcpy_from_iter_csum);
        if (likely(copied == bytes))
                return true;
        iov_iter_revert(i, copied);
        return false;
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
static __always_inline
size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
                                size_t len, void *to, void *priv2)
{
        __wsum next, *csum = priv2;

        next = csum_and_copy_from_user(iter_from, to + progress, len);
        *csum = csum_block_add(*csum, next, progress);
        return next ? 0 : len;
}bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
                                  __wsum *csum, struct iov_iter *i)
{
        size_t copied;

        if (WARN_ON_ONCE(!i->data_source))
                return false;
        copied = iterate_and_advance2(i, bytes, addr, csum,
                                      copy_from_user_iter_csum,
                                      memcpy_from_iter_csum);
        if (likely(copied == bytes))
                return true;
        iov_iter_revert(i, copied);
        return false;
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
static __always_inline
size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
                                size_t len, void *to, void *priv2)
{
        __wsum next, *csum = priv2;

        next = csum_and_copy_from_user(iter_from, to + progress, len);
        *csum = csum_block_add(*csum, next, progress);
        return next ? 0 : len;
}

--------》 csum_and_copy_from_user () ----》 copy_from_user()

include / net / checksum.h


#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static __always_inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
                                      int len)
{
        if (copy_from_user(dst, src, len))
                return 0;
        return csum_partial(dst, len, ~0U);
}
#endif
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static __always_inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
                                      int len)
{
        if (copy_from_user(dst, src, len))
                return 0;
        return csum_partial(dst, len, ~0U);
}
#endif

import sys import io import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import scapy.all as scapy from collections import defaultdict from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score import pandas as pd from scapy.layers.inet import IP, TCP, UDP, ICMP from scapy.layers.dns import DNS, DNSRR from datetime import datetime import tkinter as tk from tkinter import filedialog, messagebox # 设置Matplotlib中文显示支持 plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'WenQuanYi Micro Hei'] plt.rcParams['axes.unicode_minus'] = False # 解决负号显示问题 # 设置系统输出编码为UTF-8 sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') if os.name == 'nt': # Windows系统专用设置 os.system('chcp 65001') # 切换到UTF-8代码页 # ====================== # 1. 文件选择窗口 # ====================== def select_file(title, filetypes=None, mode='open'): """创建文件/目录选择窗口""" root = tk.Tk() root.withdraw() # 隐藏主窗口 if mode == 'open': file_path = filedialog.askopenfilename( title=title, filetypes=filetypes ) elif mode == 'directory': file_path = filedialog.askdirectory(title=title) else: file_path = filedialog.asksaveasfilename( title=title, filetypes=filetypes, defaultextension=".csv" ) root.destroy() return file_path print("请选择PCAP文件...") name_pcap = select_file( "选择PCAP文件", filetypes=[("PCAP文件", "*.pcap"), ("PCAPNG文件", "*.pcapng"), ("所有文件", "*.*")], mode='open' ) if not name_pcap: print("未选择文件,程序退出") sys.exit(0) print(f"已选择文件: {name_pcap}") # ====================== # 2. PCAP文件解析与流重组 # ====================== print("正在解析PCAP文件...") # DNS解析缓存 dns_cache = {} # 流式解析大PCAP文件 def parse_large_pcap(pcap_path): flows = defaultdict(list) # 使用字典存储网络流 packet_count = 0 # 总包计数器 ip_packet_count = 0 # IP包计数器 global dns_cache # 使用全局DNS缓存 # 使用PcapReader流式读取 with scapy.PcapReader(pcap_path) as pcap_reader: for pkt in pcap_reader: packet_count += 1 # 进度显示(每10000个包) if packet_count % 10000 == 0: print(f"已处理 {packet_count} 个包...") # DNS响应解析 if DNS in pkt and pkt[DNS].qr == 1: # 只处理DNS响应 # 获取实际可用的回答记录数量 actual_ancount = len(pkt[DNS].an) if hasattr(pkt[DNS], 'an') else 0 # 使用实际数量而不是ancount字段 for i in range(min(pkt[DNS].ancount, actual_ancount)): rr = pkt[DNS].an[i] if isinstance(rr, DNSRR) and rr.type == 1: # A记录 try: ip = rr.rdata # 正确处理域名格式 domain = rr.rrname.decode('utf-8', errors='ignore').rstrip('.') # 添加到DNS缓存 if ip not in dns_cache: dns_cache[ip] = set() dns_cache[ip].add(domain) except Exception as e: # 忽略解析错误 pass # 仅处理IP层数据包 if IP in pkt: ip_packet_count += 1 ip_layer = pkt[IP] proto_num = ip_layer.proto # 获取协议号 # 初始化端口变量 src_port, dst_port = 0, 0 # 根据协议类型提取端口信息 if proto_num == 6 and TCP in pkt: # TCP协议 src_port = pkt[TCP].sport dst_port = pkt[TCP].dport elif proto_num == 17 and UDP in pkt: # UDP协议 src_port = pkt[UDP].sport dst_port = pkt[UDP].dport elif proto_num == 1 and ICMP in pkt: # ICMP协议 # 使用类型和代码代替端口 src_port = pkt[ICMP].type dst_port = pkt[ICMP].code # 创建五元组作为流的唯一标识 flow_key = (ip_layer.src, ip_layer.dst, src_port, dst_port, proto_num) flows[flow_key].append(pkt) # 将包添加到对应流 print(f"共处理 {packet_count} 个包,其中 {ip_packet_count} 个IP包") print(f"DNS缓存大小: {len(dns_cache)} 个IP到域名的映射") return flows # 解析PCAP文件 flows = parse_large_pcap(name_pcap) print(f"共识别出 {len(flows)} 个网络流") # ====================== # 3. 特征工程 # ====================== print("正在提取流量特征...") features = [] # 存储特征向量 valid_flows = [] # 存储有效流的标识 # 只初始化源时间列表 src_start_times = [] src_end_times = [] min_packets = 5 # 流的最小包数阈值 for flow_key, packets in flows.items(): packet_count = len(packets) # 过滤包数不足的流 if packet_count < min_packets: continue # 1. 基础统计特征 total_bytes = sum(len(p) for p in packets) # 总字节数 # 2. 时序特征(包到达间隔) timestamps = np.array([float(p.time) for p in packets]) if packet_count > 1: iat = np.diff(timestamps) # 包间隔 mean_iat = np.mean(iat) # 平均间隔 std_iat = np.std(iat) if len(iat) > 1 else 0 # 间隔标准差 else: mean_iat = std_iat = 0 # 3. 包长特征 pkt_lengths = np.array([len(p) for p in packets]) mean_pkt_len = np.mean(pkt_lengths) # 平均包长 std_pkt_len = np.std(pkt_lengths) if packet_count > 1 else 0 # 包长标准差 min_pkt_len = np.min(pkt_lengths) # 最小包长 max_pkt_len = np.max(pkt_lengths) # 最大包长 # 4. 方向特征(上行比例) src_ip = flow_key[0] # 流源IP dst_ip = flow_key[1] # 流目的IP # 判断数据包方向(1=上行,0=下行) directions = np.array([1 if p[IP].src == src_ip else 0 for p in packets]) up_ratio = np.mean(directions) # 上行包比例 # 源IP发送的包的时间戳 src_pkt_times = [float(p.time) for p in packets if p[IP].src == src_ip] # 计算源的起始和终止时间 src_start = min(src_pkt_times) if src_pkt_times else np.nan src_end = max(src_pkt_times) if src_pkt_times else np.nan # 添加时间信息到列表 src_start_times.append(src_start) src_end_times.append(src_end) # 添加特征向量 features.append([ packet_count, total_bytes, mean_iat, std_iat, mean_pkt_len, std_pkt_len, min_pkt_len, max_pkt_len, up_ratio ]) valid_flows.append(flow_key) # 记录有效流 if not features: # 错误处理:无有效流 print("错误:没有提取到任何有效网络流") print("可能原因:") print("1. 所有流都少于5个包(尝试减小过滤阈值)") print("2. 文件格式不兼容(尝试用Wireshark打开验证)") print("3. 没有IP流量(检查网络捕获配置)") sys.exit(1) feature_matrix = np.array(features) print(f"提取了 {len(features)} 个有效流的特征") # ====================== # 4. 数据标准化 # ====================== print("正在进行数据标准化...") scaler = StandardScaler() scaled_features = scaler.fit_transform(feature_matrix) # Z-score标准化 # ====================== # 5. 肘部法则确定最佳K值 # ====================== print("使用肘部法则确定最佳聚类数...") n_samples = scaled_features.shape[0] # 样本数量 # 动态调整K值范围 if n_samples < 5: print(f"警告:样本数量较少({n_samples}),将使用简化聚类策略") k_range = range(1, min(11, n_samples + 1)) else: k_range = range(1, 11) # 测试1-10个聚类 sse = [] # 存储SSE(误差平方和) kmeans_models = {} # 存储不同K值的模型 for k in k_range: # 跳过超过样本数的K值 if k > n_samples: print(f"跳过K={k}(超过样本数{n_samples})") continue # 训练K-means模型 kmeans = KMeans(n_clusters=k, n_init=10, random_state=42) kmeans.fit(scaled_features) sse.append(kmeans.inertia_) # 记录SSE kmeans_models[k] = kmeans # 存储模型 # 样本不足时的处理 if len(sse) < 2: optimal_k = min(2, n_samples) # 至少2类(不超过样本数) print(f"样本过少,直接设置K={optimal_k}") else: # 绘制肘部法则图 plt.figure(figsize=(10, 6)) plt.plot(list(k_range)[:len(sse)], sse, 'bo-') plt.xlabel('聚类数量 $ K$') plt.ylabel('SSE (误差平方和)') plt.title('肘部法则确定最佳聚类数') plt.grid(True) plt.savefig('elbow_method.png', dpi=300) plt.close() print("肘部法则图已保存为 elbow_method.png") # 自动检测拐点(二阶差分最小值) if len(sse) >= 3: knee_point = np.argmin(np.diff(sse, 2)) + 2 # 计算二阶差分 optimal_k = max(2, min(10, knee_point)) # 确保K在2-10之间 else: optimal_k = max(2, min(3, n_samples)) # 样本少时选择2或3 print(f"自动检测的最佳聚类数: K = {optimal_k}") # ====================== # 6. K-means聚类 # ====================== print(f"正在进行K-means聚类 (K={optimal_k})...") # 调整K值不超过样本数 if optimal_k > n_samples: optimal_k = max(1, n_samples) print(f"调整聚类数为样本数: K = {optimal_k}") # 使用已有模型或新建模型 if optimal_k in kmeans_models: kmeans = kmeans_models[optimal_k] cluster_labels = kmeans.labels_ else: kmeans = KMeans(n_clusters=optimal_k, n_init=10, random_state=42) cluster_labels = kmeans.fit_predict(scaled_features) # 计算轮廓系数(K>1时) if optimal_k > 1: silhouette_avg = silhouette_score(scaled_features, cluster_labels) print(f"轮廓系数: {silhouette_avg:.4f} (接近1表示聚类效果良好)") else: print("聚类数为1,无需计算轮廓系数") # ====================== # 7. 结果分析与保存 # ====================== # 创建结果DataFrame result_df = pd.DataFrame({ '源IP': [flow[0] for flow in valid_flows], '目的IP': [flow[1] for flow in valid_flows], '协议': [f"TCP(6)" if flow[4] == 6 else f"UDP(17)" for flow in valid_flows], '聚类标签': cluster_labels, '源起始时间': src_start_times, '源终止时间': src_end_times }) # 添加域名信息 def get_domains(ip): """获取IP对应的域名列表""" domains = dns_cache.get(ip, set()) return ", ".join(domains) if domains else "N/A" result_df['目的域名'] = result_df['目的IP'].apply(get_domains) # 添加包数量和总字节数 result_df['包数量'] = feature_matrix[:, 0] result_df['总字节数'] = feature_matrix[:, 1] # 时间格式化函数 def format_timestamp(ts): """将时间戳格式化为可读字符串""" if np.isnan(ts): return "N/A" return datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] # 保留毫秒 # 应用时间格式化 result_df['源起始时间'] = result_df['源起始时间'].apply(format_timestamp) result_df['源终止时间'] = result_df['源终止时间'].apply(format_timestamp) # 选择需要的列 final_columns = [ '源IP', '目的IP', '协议', '源起始时间', '源终止时间', '目的域名', '包数量', '总字节数' ] result_df = result_df[final_columns] # 保存结果 - 让用户选择保存位置 print("请选择保存结果文件的文件夹...") output_dir = select_file("选择结果保存文件夹", mode='directory') if not output_dir: print("未选择保存目录,使用当前目录") output_dir = os.getcwd() # 创建完整文件路径 output_filename = "result.csv" output_path = os.path.join(output_dir, output_filename) # 检查文件是否存在,避免覆盖 counter = 1 while os.path.exists(output_path): new_filename = f"result_{counter}.csv" output_path = os.path.join(output_dir, new_filename) counter += 1 # 保存文件 try: result_df.to_csv(output_path, index=False, encoding='utf_8_sig') print(f"聚类结果已保存为 {output_path}") # 显示保存成功消息 root = tk.Tk() root.withdraw() messagebox.showinfo("保存成功", f"结果文件已保存至:\n{output_path}") root.destroy() except Exception as e: print(f"保存文件时出错: {e}") # 尝试在临时目录保存 temp_dir = os.path.join(os.environ.get('TEMP', ''), 'netflow_clustering') os.makedirs(temp_dir, exist_ok=True) temp_path = os.path.join(temp_dir, output_filename) try: result_df.to_csv(temp_path, index=False, encoding='utf_8_sig') print(f"聚类结果已保存为 {temp_path}") # 显示保存位置消息 root = tk.Tk() root.withdraw() messagebox.showinfo("保存成功", f"结果文件已保存至临时目录:\n{temp_path}") root.destroy() except Exception as e2: print(f"临时目录保存失败: {e2}") print("无法保存结果文件") # 最终输出 print("\n聚类完成!") 添加功能 现在我需要根据每个用户单独分类,定义为一个用户的标准误为IP是“10.”开头的,例如10.194.145.237 。然后去源IP和目的IP中去找含有10.194.145.237的数据,整合到一个单独的Excel文件进行输出。让用户能够自定义文件的存储路径
07-10
<think>我们已经在之前的代码中提取了每个流的特征并进行了聚类。现在,我们需要根据用户IP(以"10."开头)来筛选属于特定用户的流,并将这些流保存到单独的Excel文件中。 步骤: 1. 从用户那里获取一个或多个用户IP(以"10."开头),或者我们可以自动识别所有以"10."开头的IP作为用户IP。 2. 对于每个用户IP,我们筛选出源IP或目的IP包含该用户IP的所有流(即该用户作为源或目的)。 3. 将筛选出的流数据(即之前聚类结果中对应的流)保存到一个单独的Excel文件中,每个用户一个文件。 4. 允许用户选择保存这些文件的目录。 但是,注意:我们之前的结果已经保存在一个DataFrame(result_df)中,我们可以直接从这个DataFrame中筛选。 修改计划: 我们将添加一个步骤,在保存总的结果文件后,进行用户流的提取和保存。 具体实现: 1. 从result_df中提取所有以"10."开头的IP(作为用户IP候选)。 2. 对于每个这样的用户IP,筛选result_df中源IP或目的IP等于该IP的行。 3. 将筛选出的数据保存为Excel文件(.xlsx),因为Excel文件可以包含多个工作表,但这里我们每个用户一个文件,所以每个文件只有一个工作表。 4. 让用户选择保存这些用户文件的目录。 注意:同一个流可能同时包含源IP和目的IP都是用户IP?但我们的用户IP是单个IP,所以一个流中如果有一个用户IP(在源或目的)就属于该用户。 另外,为了避免重复,我们只对每个唯一的用户IP进行处理。 代码添加位置: 在保存总结果文件(result.csv)之后。 步骤: 1. 识别所有用户IP(以"10."开头的IP地址) 2. 如果没有找到用户IP,则跳过该步骤。 3. 让用户选择保存用户文件的目录。 4. 对每个用户IP,创建一个新的DataFrame,包含该用户的所有流。 5. 将每个用户的DataFrame保存为单独的Excel文件(.xlsx)。 注意:由于用户IP可能很多,我们可能需要限制一次处理的数量?或者全部处理。 另外,我们也可以考虑将同一个用户的所有流保存到一个Excel文件的不同工作表中?但是这里每个用户的数据量可能很大,而且用户之间独立,所以单独文件更合适。 实现: 1. 从result_df中提取所有源IP和目的IP,然后合并去重,再筛选出以"10."开头的IP。 2. 遍历这些用户IP,对每个IP,筛选result_df中满足 (源IP==该IP) 或 (目的IP==该IP) 的行。 3. 保存为Excel文件,文件名包含用户IP(注意:IP中的点用下划线替换,避免文件名问题)。 但是,注意:同一个用户可能在多个流中既作为源也作为目的,我们都要包括。 代码: 我们将添加以下代码段在保存总结果文件之后。</think>我将为您添加根据用户IP筛选流量的功能,并允许用户自定义保存路径。以下是实现方案: ### 添加功能:按用户IP筛选流量并保存为单独Excel文件 ```python # ====================== # 8. 按用户IP筛选流量并保存 # ====================== def extract_user_flows(user_ip, result_df): """ 筛选包含特定用户IP的流量 :param user_ip: 用户IP地址 :param result_df: 包含所有流量的DataFrame :return: 筛选后的DataFrame """ # 筛选源IP或目的IP包含用户IP的记录 user_mask = (result_df['源IP'] == user_ip) | (result_df['目的IP'] == user_ip) user_flows = result_df[user_mask].copy() # 添加方向列 def get_direction(row): if row['源IP'] == user_ip and row['目的IP'] == user_ip: return "内部流量" elif row['源IP'] == user_ip: return "出站流量" else: return "入站流量" user_flows['流量方向'] = user_flows.apply(get_direction, axis=1) return user_flows def save_user_flows(user_ip, user_flows, output_dir): """ 保存用户流量到Excel文件 :param user_ip: 用户IP地址 :param user_flows: 用户流量DataFrame :param output_dir: 输出目录 """ # 创建安全文件名(替换特殊字符) safe_ip = user_ip.replace('.', '_').replace(':', '_') filename = f"用户流量_{safe_ip}.xlsx" output_path = os.path.join(output_dir, filename) # 检查文件是否存在,避免覆盖 counter = 1 while os.path.exists(output_path): new_filename = f"用户流量_{safe_ip}_{counter}.xlsx" output_path = os.path.join(output_dir, new_filename) counter += 1 try: # 保存为Excel文件 user_flows.to_excel(output_path, index=False, engine='openpyxl') print(f"用户 {user_ip} 的流量已保存至: {output_path}") return output_path except Exception as e: print(f"保存用户 {user_ip} 流量时出错: {e}") return None # 主功能:识别并保存用户流量 def process_user_flows(result_df): """处理所有用户流量""" # 识别所有用户IP(以"10."开头的IP) all_ips = set(result_df['源IP']).union(set(result_df['目的IP'])) user_ips = [ip for ip in all_ips if ip.startswith('10.')] if not user_ips: print("未找到以'10.'开头的用户IP") return print(f"找到 {len(user_ips)} 个用户IP: {', '.join(user_ips)}") # 让用户选择保存目录 print("\n请选择保存用户流量文件的文件夹...") user_output_dir = select_file("选择用户流量保存文件夹", mode='directory') if not user_output_dir: print("未选择保存目录,使用当前目录") user_output_dir = os.getcwd() # 处理每个用户IP saved_files = [] for user_ip in user_ips: print(f"\n处理用户: {user_ip}") user_flows = extract_user_flows(user_ip, result_df) if user_flows.empty: print(f"未找到用户 {user_ip} 的相关流量") continue print(f"找到 {len(user_flows)} 条相关流量记录") saved_path = save_user_flows(user_ip, user_flows, user_output_dir) if saved_path: saved_files.append(saved_path) # 显示完成消息 if saved_files: root = tk.Tk() root.withdraw() message = f"已保存 {len(saved_files)} 个用户流量文件:\n" + "\n".join(saved_files) messagebox.showinfo("用户流量保存完成", message) root.destroy() # 在保存总结果后调用此功能 print("\n开始处理用户流量...") process_user_flows(result_df) print("用户流量处理完成!") ``` ### 代码解释: 1. **`extract_user_flows` 函数**: - 根据用户IP筛选源IP或目的IP包含该IP的流量 - 添加"流量方向"列,区分入站、出站和内部流量 - 返回包含用户所有流量的DataFrame 2. **`save_user_flows` 函数**: - 创建安全的文件名(替换IP中的特殊字符) - 避免覆盖已有文件(自动添加序号) - 使用openpyxl引擎保存为Excel文件 3. **`process_user_flows` 主函数**: - 自动识别所有以"10."开头的IP地址 - 让用户选择保存目录 - 为每个用户IP生成单独的Excel文件 - 显示完成消息和保存路径 4. **执行流程**: - 在保存总结果文件后调用此功能 - 处理所有符合条件的用户IP - 为每个用户生成包含其所有流量的Excel文件 ### 使用示例: 当用户IP为"10.194.145.237"时: 1. 程序会自动识别所有源IP或目的IP包含"10.194.145.237"的流量 2. 添加"流量方向"列(入站/出站/内部) 3. 保存为"用户流量_10_194_145_237.xlsx"文件 4. 用户可在选择的目录中找到该文件 ### 注意事项: 1. 如果找不到以"10."开头的IP,程序会提示并跳过此步骤 2. 文件保存路径完全由用户选择 3. Excel文件格式便于用户查看和分析 ### 完整集成: 将上述代码添加到主程序的最后部分(在保存总结果文件后),即可实现所需功能。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值