int main(int argc, char *argv[])
{
struct udev *udev;
sigset_t mask;
int daemonize = false;
int resolve_names = 1;
static const struct option options[] = {
{ "daemon", no_argument, NULL, 'd' },
{ "debug", no_argument, NULL, 'D' },
{ "children-max", required_argument, NULL, 'c' },
{ "exec-delay", required_argument, NULL, 'e' },
{ "resolve-names", required_argument, NULL, 'N' },
{ "help", no_argument, NULL, 'h' },
{ "version", no_argument, NULL, 'V' },
{}
};
int fd_ctrl = -1;
int fd_netlink = -1;
int fd_worker = -1;
struct epoll_event ep_ctrl, ep_inotify, ep_signal, ep_netlink, ep_worker;
struct udev_ctrl_connection *ctrl_conn = NULL;
int rc = 1;
udev = udev_new();
if (udev == NULL)
goto exit;
log_set_target(LOG_TARGET_AUTO);
log_parse_environment();
log_open();
udev_set_log_fn(udev, udev_main_log);
log_debug("version %s\n", VERSION);
label_init("/dev");
for (;;) {
int option;
option = getopt_long(argc, argv, "c:deDtN:hV", options, NULL);
if (option == -1)
break;
switch (option) {
case 'd':
daemonize = true;
break;
case 'c':
children_max = strtoul(optarg, NULL, 0);
break;
case 'e':
exec_delay = strtoul(optarg, NULL, 0);
break;
case 'D':
debug = true;
log_set_max_level(LOG_DEBUG);
udev_set_log_priority(udev, LOG_DEBUG);
break;
case 'N':
if (strcmp (optarg, "early") == 0) {
resolve_names = 1;
} else if (strcmp (optarg, "late") == 0) {
resolve_names = 0;
} else if (strcmp (optarg, "never") == 0) {
resolve_names = -1;
} else {
fprintf(stderr, "resolve-names must be early, late or never\n");
log_error("resolve-names must be early, late or never\n");
goto exit;
}
break;
case 'h':
printf("Usage: udevd OPTIONS\n"
" --daemon\n"
" --debug\n"
" --children-max=<maximum number of workers>\n"
" --exec-delay=<seconds to wait before executing RUN=>\n"
" --resolve-names=early|late|never\n"
" --version\n"
" --help\n"
"\n");
goto exit;
case 'V':
printf("%s\n", VERSION);
goto exit;
default:
goto exit;
}
}
kernel_cmdline_options(udev);
if (getuid() != 0) {
fprintf(stderr, "root privileges required\n");
log_error("root privileges required\n");
goto exit;
}
/* set umask before creating any file/directory */
chdir("/");
umask(022);
mkdir("/run/udev", 0755);
dev_setup(NULL);
static_dev_create_from_modules(udev);
/* before opening new files, make sure std{in,out,err} fds are in a sane state */
if (daemonize) {
int fd;
fd = open("/dev/null", O_RDWR);
if (fd >= 0) {
if (write(STDOUT_FILENO, 0, 0) < 0)
dup2(fd, STDOUT_FILENO);
if (write(STDERR_FILENO, 0, 0) < 0)
dup2(fd, STDERR_FILENO);
if (fd > STDERR_FILENO)
close(fd);
} else {
fprintf(stderr, "cannot open /dev/null\n");
log_error("cannot open /dev/null\n");
}
}
if (systemd_fds(udev, &fd_ctrl, &fd_netlink) >= 0) {
/* get control and netlink socket from systemd */
udev_ctrl = udev_ctrl_new_from_fd(udev, fd_ctrl);
if (udev_ctrl == NULL) {
log_error("error taking over udev control socket");
rc = 1;
goto exit;
}
monitor = udev_monitor_new_from_netlink_fd(udev, "kernel", fd_netlink);
if (monitor == NULL) {
log_error("error taking over netlink socket\n");
rc = 3;
goto exit;
}
/* get our own cgroup, we regularly kill everything udev has left behind */
if (cg_get_by_pid(SYSTEMD_CGROUP_CONTROLLER, 0, &udev_cgroup) < 0)
udev_cgroup = NULL;
} else {
/* open control and netlink socket */
udev_ctrl = udev_ctrl_new(udev);
if (udev_ctrl == NULL) {
fprintf(stderr, "error initializing udev control socket");
log_error("error initializing udev control socket");
rc = 1;
goto exit;
}
fd_ctrl = udev_ctrl_get_fd(udev_ctrl);
monitor = udev_monitor_new_from_netlink(udev, "kernel");
if (monitor == NULL) {
fprintf(stderr, "error initializing netlink socket\n");
log_error("error initializing netlink socket\n");
rc = 3;
goto exit;
}
fd_netlink = udev_monitor_get_fd(monitor);
}
if (udev_monitor_enable_receiving(monitor) < 0) {
fprintf(stderr, "error binding netlink socket\n");
log_error("error binding netlink socket\n");
rc = 3;
goto exit;
}
if (udev_ctrl_enable_receiving(udev_ctrl) < 0) {
fprintf(stderr, "error binding udev control socket\n");
log_error("error binding udev control socket\n");
rc = 1;
goto exit;
}
udev_monitor_set_receive_buffer_size(monitor, 128*1024*1024);
/* create queue file before signalling 'ready', to make sure we block 'settle' */
udev_queue_export = udev_queue_export_new(udev);
if (udev_queue_export == NULL) {
log_error("error creating queue file\n");
goto exit;
}
if (daemonize) {
pid_t pid;
pid = fork();
switch (pid) {
case 0:
break;
case -1:
log_error("fork of daemon failed: %m\n");
rc = 4;
goto exit;
default:
rc = EXIT_SUCCESS;
goto exit_daemonize;
}
setsid();
write_one_line_file("/proc/self/oom_score_adj", "-1000");
} else {
sd_notify(1, "READY=1");
}
print_kmsg("starting version " VERSION "\n");
if (!debug) {
int fd;
fd = open("/dev/null", O_RDWR);
if (fd >= 0) {
dup2(fd, STDIN_FILENO);
dup2(fd, STDOUT_FILENO);
dup2(fd, STDERR_FILENO);
close(fd);
}
}
fd_inotify = udev_watch_init(udev);
if (fd_inotify < 0) {
fprintf(stderr, "error initializing inotify\n");
log_error("error initializing inotify\n");
rc = 4;
goto exit;
}
udev_watch_restore(udev);
/* block and listen to all signals on signalfd */
sigfillset(&mask);
sigprocmask(SIG_SETMASK, &mask, &sigmask_orig);
fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
if (fd_signal < 0) {
fprintf(stderr, "error creating signalfd\n");
log_error("error creating signalfd\n");
rc = 5;
goto exit;
}
/* unnamed socket from workers to the main daemon */
if (socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, worker_watch) < 0) {
fprintf(stderr, "error creating socketpair\n");
log_error("error creating socketpair\n");
rc = 6;
goto exit;
}
fd_worker = worker_watch[READ_END];
udev_builtin_init(udev);
rules = udev_rules_new(udev, resolve_names);
if (rules == NULL) {
log_error("error reading rules\n");
goto exit;
}
memset(&ep_ctrl, 0, sizeof(struct epoll_event));
ep_ctrl.events = EPOLLIN;
ep_ctrl.data.fd = fd_ctrl;
memset(&ep_inotify, 0, sizeof(struct epoll_event));
ep_inotify.events = EPOLLIN;
ep_inotify.data.fd = fd_inotify;
memset(&ep_signal, 0, sizeof(struct epoll_event));
ep_signal.events = EPOLLIN;
ep_signal.data.fd = fd_signal;
memset(&ep_netlink, 0, sizeof(struct epoll_event));
ep_netlink.events = EPOLLIN;
ep_netlink.data.fd = fd_netlink;
memset(&ep_worker, 0, sizeof(struct epoll_event));
ep_worker.events = EPOLLIN;
ep_worker.data.fd = fd_worker;
fd_ep = epoll_create1(EPOLL_CLOEXEC);
if (fd_ep < 0) {
log_error("error creating epoll fd: %m\n");
goto exit;
}
if (epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_ctrl, &ep_ctrl) < 0 ||
epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_inotify, &ep_inotify) < 0 ||
epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal) < 0 ||
epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_netlink, &ep_netlink) < 0 ||
epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_worker, &ep_worker) < 0) {
log_error("fail to add fds to epoll: %m\n");
goto exit;
}
/* if needed, convert old database from earlier udev version */
convert_db(udev);
if (children_max <= 0) {
int memsize = mem_size_mb();
/* set value depending on the amount of RAM */
if (memsize > 0)
children_max = 16 + (memsize / 8);
else
children_max = 16;
}
log_debug("set children_max to %u\n", children_max);
udev_rules_apply_static_dev_perms(rules);
udev_list_node_init(&event_list);
udev_list_node_init(&worker_list);
for (;;) {
static unsigned long long last_usec;
struct epoll_event ev[8];
int fdcount;
int timeout;
bool is_worker, is_signal, is_inotify, is_netlink, is_ctrl;
int i;
if (udev_exit) {
/* close sources of new events and discard buffered events */
if (fd_ctrl >= 0) {
epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_ctrl, NULL);
fd_ctrl = -1;
}
if (monitor != NULL) {
epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_netlink, NULL);
udev_monitor_unref(monitor);
monitor = NULL;
}
if (fd_inotify >= 0) {
epoll_ctl(fd_ep, EPOLL_CTL_DEL, fd_inotify, NULL);
close(fd_inotify);
fd_inotify = -1;
}
/* discard queued events and kill workers */
event_queue_cleanup(udev, EVENT_QUEUED);
worker_kill(udev);
/* exit after all has cleaned up */
if (udev_list_node_is_empty(&event_list) && udev_list_node_is_empty(&worker_list))
break;
/* timeout at exit for workers to finish */
timeout = 30 * 1000;
} else if (udev_list_node_is_empty(&event_list) && !children) {
/* we are idle */
timeout = -1;
/* cleanup possible left-over processes in our cgroup */
if (udev_cgroup)
cg_kill(SYSTEMD_CGROUP_CONTROLLER, udev_cgroup, SIGKILL, false, true, NULL);
} else {
/* kill idle or hanging workers */
timeout = 3 * 1000;
}
fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), timeout);
if (fdcount < 0)
continue;
if (fdcount == 0) {
struct udev_list_node *loop;
/* timeout */
if (udev_exit) {
log_error("timeout, giving up waiting for workers to finish\n");
break;
}
/* kill idle workers */
if (udev_list_node_is_empty(&event_list)) {
log_debug("cleanup idle workers\n");
worker_kill(udev);
}
/* check for hanging events */
udev_list_node_foreach(loop, &worker_list) {
struct worker *worker = node_to_worker(loop);
if (worker->state != WORKER_RUNNING)
continue;
if ((now_usec() - worker->event_start_usec) > 30 * 1000 * 1000) {
log_error("worker [%u] %s timeout; kill it\n", worker->pid,
worker->event ? worker->event->devpath : "<idle>");
kill(worker->pid, SIGKILL);
worker->state = WORKER_KILLED;
/* drop reference taken for state 'running' */
worker_unref(worker);
if (worker->event) {
log_error("seq %llu '%s' killed\n",
udev_device_get_seqnum(worker->event->dev), worker->event->devpath);
worker->event->exitcode = -64;
event_queue_delete(worker->event, true);
worker->event = NULL;
}
}
}
}
is_worker = is_signal = is_inotify = is_netlink = is_ctrl = false;
for (i = 0; i < fdcount; i++) {
if (ev[i].data.fd == fd_worker && ev[i].events & EPOLLIN)
is_worker = true;
else if (ev[i].data.fd == fd_netlink && ev[i].events & EPOLLIN)
is_netlink = true;
else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN)
is_signal = true;
else if (ev[i].data.fd == fd_inotify && ev[i].events & EPOLLIN)
is_inotify = true;
else if (ev[i].data.fd == fd_ctrl && ev[i].events & EPOLLIN)
is_ctrl = true;
}
/* check for changed config, every 3 seconds at most */
if ((now_usec() - last_usec) > 3 * 1000 * 1000) {
if (udev_rules_check_timestamp(rules))
reload = true;
if (udev_builtin_validate(udev))
reload = true;
last_usec = now_usec();
}
/* reload requested, HUP signal received, rules changed, builtin changed */
if (reload) {
worker_kill(udev);
rules = udev_rules_unref(rules);
udev_builtin_exit(udev);
reload = false;
}
/* event has finished */
if (is_worker)
worker_returned(fd_worker);
if (is_netlink) {
struct udev_device *dev;
dev = udev_monitor_receive_device(monitor);
if (dev != NULL) {
udev_device_set_usec_initialized(dev, now_usec());
if (event_queue_insert(dev) < 0)
udev_device_unref(dev);
}
}
/* start new events */
if (!udev_list_node_is_empty(&event_list) && !udev_exit && !stop_exec_queue) {
udev_builtin_init(udev);
if (rules == NULL)
rules = udev_rules_new(udev, resolve_names);
if (rules != NULL)
event_queue_start(udev);
}
if (is_signal) {
struct signalfd_siginfo fdsi;
ssize_t size;
size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
if (size == sizeof(struct signalfd_siginfo))
handle_signal(udev, fdsi.ssi_signo);
}
/* we are shutting down, the events below are not handled anymore */
if (udev_exit)
continue;
/* device node watch */
if (is_inotify)
handle_inotify(udev);
/*
* This needs to be after the inotify handling, to make sure,
* that the ping is send back after the possibly generated
* "change" events by the inotify device node watch.
*
* A single time we may receive a client connection which we need to
* keep open to block the client. It will be closed right before we
* exit.
*/
if (is_ctrl)
ctrl_conn = handle_ctrl_msg(udev_ctrl);
}
rc = EXIT_SUCCESS;
exit:
udev_queue_export_cleanup(udev_queue_export);
udev_ctrl_cleanup(udev_ctrl);
exit_daemonize:
if (fd_ep >= 0)
close(fd_ep);
worker_list_cleanup(udev);
event_queue_cleanup(udev, EVENT_UNDEF);
udev_rules_unref(rules);
udev_builtin_exit(udev);
if (fd_signal >= 0)
close(fd_signal);
if (worker_watch[READ_END] >= 0)
close(worker_watch[READ_END]);
if (worker_watch[WRITE_END] >= 0)
close(worker_watch[WRITE_END]);
udev_monitor_unref(monitor);
udev_queue_export_unref(udev_queue_export);
udev_ctrl_connection_unref(ctrl_conn);
udev_ctrl_unref(udev_ctrl);
label_finish();
udev_unref(udev);
log_close();
return rc;
}
struct udev {
int refcount;
void (*log_fn)(struct udev *udev, int priority, const char *file, int line, const char *fn,
const char *format, va_list args);
void *userdata;
struct udev_list properties_list;
int log_priority;
};
void udev_event_execute_run(struct udev_event *event, const sigset_t *sigmask)
{
struct udev_list_entry *list_entry;
udev_list_entry_foreach(list_entry, udev_list_get_entry(&event->run_list)) {
const char *cmd = udev_list_entry_get_name(list_entry);
enum udev_builtin_cmd builtin_cmd = udev_list_entry_get_num(list_entry);
if (builtin_cmd < UDEV_BUILTIN_MAX) {
char command[UTIL_PATH_SIZE];
udev_event_apply_format(event, cmd, command, sizeof(command));
udev_builtin_run(event->dev, builtin_cmd, command, false); //udev_builtin_run()...
//+-- builtins[cmd]->cmd(dev, argc, argv, test);
} else {
char program[UTIL_PATH_SIZE];
char **envp;
if (event->exec_delay > 0) {
log_debug("delay execution of '%s'\n", program);
sleep(event->exec_delay);
}
udev_event_apply_format(event, cmd, program, sizeof(program));
envp = udev_device_get_properties_envp(event->dev);
udev_event_spawn(event, program, envp, sigmask, NULL, 0);
}
}
}
//main()@src/udev/udevd.c
int main(int argc, char *argv[])
+-- udev = udev_new();
...
+-- dev_setup(NULL);
+-- static_dev_create_from_modules(udev);
...
+-- udev_ctrl = udev_ctrl_new(udev);
+-- fd_ctrl = udev_ctrl_get_fd(udev_ctrl);
+-- monitor = udev_monitor_new_from_netlink(udev, "kernel");
+-- fd_netlink = udev_monitor_get_fd(monitor);
...
+-- udev_monitor_enable_receiving(monitor)
+-- udev_ctrl_enable_receiving(udev_ctrl)
...
+-- udev_monitor_set_receive_buffer_size(monitor, 128*1024*1024);
+-- udev_queue_export = udev_queue_export_new(udev);
...
+-- fd_ep = epoll_create1(EPOLL_CLOEXEC);
+-- epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_ctrl, &ep_ctrl)
+-- epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_inotify, &ep_inotify)
+-- epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal)
+-- epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_netlink, &ep_netlink)
+-- epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_worker, &ep_worker)
...
+-- for(;;) {
...
if (!udev_list_node_is_empty(&event_list) && !udev_exit && !stop_exec_queue) {
udev_builtin_init(udev);
if (rules == NULL)
rules = udev_rules_new(udev, resolve_names);
if (rules != NULL)
event_queue_start(udev); //event_queue_start(udev)...
}
...
}
static void
event_queue_start(struct udev *udev)
+-- udev_list_node_foreach(loop, &event_list) {
struct event *event = node_to_event(loop);
event_run(event); //遍历event_list,调用event_run(event)
/* +-- udev_list_node_foreach(loop, &worker_list) {
struct worker *worker = node_to_worker(loop);
count = udev_monitor_send_device(monitor, worker->monitor, event->dev);
worker_ref(worker);
worker->event = event;
worker->state = WORKER_RUNNING;
worker->event_start_usec = now_usec();
event->state = EVENT_RUNNING;
return;
}
+-- worker_new(event); */
}
static void
worker_new(struct event *event)
+-- worker_monitor = udev_monitor_new_from_netlink(udev, NULL);
+-- udev_monitor_allow_unicast_sender(worker_monitor, monitor);
+-- udev_monitor_enable_receiving(worker_monitor);
+-- worker = calloc(1, sizeof(struct worker));
+-- worker->refcount = 2;
+-- worker->udev = udev;
+-- pid = fork();
switch (pid) {
case 0: { //子进程
...
dev = event->dev;
...
fd_ep = epoll_create1(EPOLL_CLOEXEC);
...
memset(&ep_signal, 0, sizeof(struct epoll_event));
ep_signal.events = EPOLLIN;
ep_signal.data.fd = fd_signal;
...
fd_monitor = udev_monitor_get_fd(worker_monitor);
memset(&ep_monitor, 0, sizeof(struct epoll_event));
ep_monitor.events = EPOLLIN;
ep_monitor.data.fd = fd_monitor;
...
epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_signal, &ep_signal)
epoll_ctl(fd_ep, EPOLL_CTL_ADD, fd_monitor, &ep_monitor)
...
for (;;) {
...
udev_event = udev_event_new(dev);
udev_event->fd_signal = fd_signal;
udev_event_execute_rules(udev_event, rules, &sigmask_orig);
udev_event_execute_rules(udev_event, rules, &sigmask_orig);
udev_event_execute_run(udev_event, &sigmask_orig); //udev_event_execute_run()...
...
while (dev == NULL) {
struct epoll_event ev[4];
int fdcount;
fdcount = epoll_wait(fd_ep, ev, ELEMENTSOF(ev), -1);
for (i = 0; i < fdcount; i++) {
if (ev[i].data.fd == fd_monitor && ev[i].events & EPOLLIN) {
dev = udev_monitor_receive_device(worker_monitor);
break;
} else if (ev[i].data.fd == fd_signal && ev[i].events & EPOLLIN) {
...
size = read(fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
if (size != sizeof(struct signalfd_siginfo))
continue;
switch (fdsi.ssi_signo) {
case SIGTERM:
goto out;
}
}
}
}
}
default: //子进程
/* close monitor, but keep address around */
udev_monitor_disconnect(worker_monitor);
worker->monitor = worker_monitor;
worker->pid = pid;
worker->state = WORKER_RUNNING;
worker->event_start_usec = now_usec();
worker->event = event;
event->state = EVENT_RUNNING;
udev_list_node_append(&worker->node, &worker_list);
children++;
log_debug("seq %llu forked new worker [%u]\n", udev_device_get_seqnum(event->dev), pid);
break;
}
udev&mdev 之一 -- udev@systemd
最新推荐文章于 2024-08-14 11:27:23 发布