]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - lib/kobject_uevent.c
Merge tag 'tty-6.9-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[thirdparty/kernel/linux.git] / lib / kobject_uevent.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * kernel userspace event delivery
4 *
5 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2004 Novell, Inc. All rights reserved.
7 * Copyright (C) 2004 IBM, Inc. All rights reserved.
8 *
9 * Authors:
10 * Robert Love <rml@novell.com>
11 * Kay Sievers <kay.sievers@vrfy.org>
12 * Arjan van de Ven <arjanv@redhat.com>
13 * Greg Kroah-Hartman <greg@kroah.com>
14 */
15
16 #include <linux/spinlock.h>
17 #include <linux/string.h>
18 #include <linux/kobject.h>
19 #include <linux/export.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/socket.h>
23 #include <linux/skbuff.h>
24 #include <linux/netlink.h>
25 #include <linux/uidgid.h>
26 #include <linux/uuid.h>
27 #include <linux/ctype.h>
28 #include <net/sock.h>
29 #include <net/netlink.h>
30 #include <net/net_namespace.h>
31
32
33 atomic64_t uevent_seqnum;
34 #ifdef CONFIG_UEVENT_HELPER
35 char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
36 #endif
37
38 struct uevent_sock {
39 struct list_head list;
40 struct sock *sk;
41 };
42
43 #ifdef CONFIG_NET
44 static LIST_HEAD(uevent_sock_list);
45 /* This lock protects uevent_sock_list */
46 static DEFINE_MUTEX(uevent_sock_mutex);
47 #endif
48
49 /* the strings here must match the enum in include/linux/kobject.h */
50 static const char *kobject_actions[] = {
51 [KOBJ_ADD] = "add",
52 [KOBJ_REMOVE] = "remove",
53 [KOBJ_CHANGE] = "change",
54 [KOBJ_MOVE] = "move",
55 [KOBJ_ONLINE] = "online",
56 [KOBJ_OFFLINE] = "offline",
57 [KOBJ_BIND] = "bind",
58 [KOBJ_UNBIND] = "unbind",
59 };
60
61 static int kobject_action_type(const char *buf, size_t count,
62 enum kobject_action *type,
63 const char **args)
64 {
65 enum kobject_action action;
66 size_t count_first;
67 const char *args_start;
68 int ret = -EINVAL;
69
70 if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
71 count--;
72
73 if (!count)
74 goto out;
75
76 args_start = strnchr(buf, count, ' ');
77 if (args_start) {
78 count_first = args_start - buf;
79 args_start = args_start + 1;
80 } else
81 count_first = count;
82
83 for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) {
84 if (strncmp(kobject_actions[action], buf, count_first) != 0)
85 continue;
86 if (kobject_actions[action][count_first] != '\0')
87 continue;
88 if (args)
89 *args = args_start;
90 *type = action;
91 ret = 0;
92 break;
93 }
94 out:
95 return ret;
96 }
97
98 static const char *action_arg_word_end(const char *buf, const char *buf_end,
99 char delim)
100 {
101 const char *next = buf;
102
103 while (next <= buf_end && *next != delim)
104 if (!isalnum(*next++))
105 return NULL;
106
107 if (next == buf)
108 return NULL;
109
110 return next;
111 }
112
113 static int kobject_action_args(const char *buf, size_t count,
114 struct kobj_uevent_env **ret_env)
115 {
116 struct kobj_uevent_env *env = NULL;
117 const char *next, *buf_end, *key;
118 int key_len;
119 int r = -EINVAL;
120
121 if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0'))
122 count--;
123
124 if (!count)
125 return -EINVAL;
126
127 env = kzalloc(sizeof(*env), GFP_KERNEL);
128 if (!env)
129 return -ENOMEM;
130
131 /* first arg is UUID */
132 if (count < UUID_STRING_LEN || !uuid_is_valid(buf) ||
133 add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf))
134 goto out;
135
136 /*
137 * the rest are custom environment variables in KEY=VALUE
138 * format with ' ' delimiter between each KEY=VALUE pair
139 */
140 next = buf + UUID_STRING_LEN;
141 buf_end = buf + count - 1;
142
143 while (next <= buf_end) {
144 if (*next != ' ')
145 goto out;
146
147 /* skip the ' ', key must follow */
148 key = ++next;
149 if (key > buf_end)
150 goto out;
151
152 buf = next;
153 next = action_arg_word_end(buf, buf_end, '=');
154 if (!next || next > buf_end || *next != '=')
155 goto out;
156 key_len = next - buf;
157
158 /* skip the '=', value must follow */
159 if (++next > buf_end)
160 goto out;
161
162 buf = next;
163 next = action_arg_word_end(buf, buf_end, ' ');
164 if (!next)
165 goto out;
166
167 if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s",
168 key_len, key, (int) (next - buf), buf))
169 goto out;
170 }
171
172 r = 0;
173 out:
174 if (r)
175 kfree(env);
176 else
177 *ret_env = env;
178 return r;
179 }
180
181 /**
182 * kobject_synth_uevent - send synthetic uevent with arguments
183 *
184 * @kobj: struct kobject for which synthetic uevent is to be generated
185 * @buf: buffer containing action type and action args, newline is ignored
186 * @count: length of buffer
187 *
188 * Returns 0 if kobject_synthetic_uevent() is completed with success or the
189 * corresponding error when it fails.
190 */
191 int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
192 {
193 char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL };
194 enum kobject_action action;
195 const char *action_args;
196 struct kobj_uevent_env *env;
197 const char *msg = NULL, *devpath;
198 int r;
199
200 r = kobject_action_type(buf, count, &action, &action_args);
201 if (r) {
202 msg = "unknown uevent action string";
203 goto out;
204 }
205
206 if (!action_args) {
207 r = kobject_uevent_env(kobj, action, no_uuid_envp);
208 goto out;
209 }
210
211 r = kobject_action_args(action_args,
212 count - (action_args - buf), &env);
213 if (r == -EINVAL) {
214 msg = "incorrect uevent action arguments";
215 goto out;
216 }
217
218 if (r)
219 goto out;
220
221 r = kobject_uevent_env(kobj, action, env->envp);
222 kfree(env);
223 out:
224 if (r) {
225 devpath = kobject_get_path(kobj, GFP_KERNEL);
226 pr_warn("synth uevent: %s: %s\n",
227 devpath ?: "unknown device",
228 msg ?: "failed to send uevent");
229 kfree(devpath);
230 }
231 return r;
232 }
233
234 #ifdef CONFIG_UEVENT_HELPER
235 static int kobj_usermode_filter(struct kobject *kobj)
236 {
237 const struct kobj_ns_type_operations *ops;
238
239 ops = kobj_ns_ops(kobj);
240 if (ops) {
241 const void *init_ns, *ns;
242
243 ns = kobj->ktype->namespace(kobj);
244 init_ns = ops->initial_ns();
245 return ns != init_ns;
246 }
247
248 return 0;
249 }
250
251 static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
252 {
253 int buffer_size = sizeof(env->buf) - env->buflen;
254 int len;
255
256 len = strscpy(&env->buf[env->buflen], subsystem, buffer_size);
257 if (len < 0) {
258 pr_warn("%s: insufficient buffer space (%u left) for %s\n",
259 __func__, buffer_size, subsystem);
260 return -ENOMEM;
261 }
262
263 env->argv[0] = uevent_helper;
264 env->argv[1] = &env->buf[env->buflen];
265 env->argv[2] = NULL;
266
267 env->buflen += len + 1;
268 return 0;
269 }
270
271 static void cleanup_uevent_env(struct subprocess_info *info)
272 {
273 kfree(info->data);
274 }
275 #endif
276
277 #ifdef CONFIG_NET
278 static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env,
279 const char *action_string,
280 const char *devpath)
281 {
282 struct netlink_skb_parms *parms;
283 struct sk_buff *skb = NULL;
284 char *scratch;
285 size_t len;
286
287 /* allocate message with maximum possible size */
288 len = strlen(action_string) + strlen(devpath) + 2;
289 skb = alloc_skb(len + env->buflen, GFP_KERNEL);
290 if (!skb)
291 return NULL;
292
293 /* add header */
294 scratch = skb_put(skb, len);
295 sprintf(scratch, "%s@%s", action_string, devpath);
296
297 skb_put_data(skb, env->buf, env->buflen);
298
299 parms = &NETLINK_CB(skb);
300 parms->creds.uid = GLOBAL_ROOT_UID;
301 parms->creds.gid = GLOBAL_ROOT_GID;
302 parms->dst_group = 1;
303 parms->portid = 0;
304
305 return skb;
306 }
307
308 static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
309 const char *action_string,
310 const char *devpath)
311 {
312 struct sk_buff *skb = NULL;
313 struct uevent_sock *ue_sk;
314 int retval = 0;
315
316 /* send netlink message */
317 mutex_lock(&uevent_sock_mutex);
318 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
319 struct sock *uevent_sock = ue_sk->sk;
320
321 if (!netlink_has_listeners(uevent_sock, 1))
322 continue;
323
324 if (!skb) {
325 retval = -ENOMEM;
326 skb = alloc_uevent_skb(env, action_string, devpath);
327 if (!skb)
328 continue;
329 }
330
331 retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1,
332 GFP_KERNEL);
333 /* ENOBUFS should be handled in userspace */
334 if (retval == -ENOBUFS || retval == -ESRCH)
335 retval = 0;
336 }
337 mutex_unlock(&uevent_sock_mutex);
338 consume_skb(skb);
339
340 return retval;
341 }
342
343 static int uevent_net_broadcast_tagged(struct sock *usk,
344 struct kobj_uevent_env *env,
345 const char *action_string,
346 const char *devpath)
347 {
348 struct user_namespace *owning_user_ns = sock_net(usk)->user_ns;
349 struct sk_buff *skb = NULL;
350 int ret = 0;
351
352 skb = alloc_uevent_skb(env, action_string, devpath);
353 if (!skb)
354 return -ENOMEM;
355
356 /* fix credentials */
357 if (owning_user_ns != &init_user_ns) {
358 struct netlink_skb_parms *parms = &NETLINK_CB(skb);
359 kuid_t root_uid;
360 kgid_t root_gid;
361
362 /* fix uid */
363 root_uid = make_kuid(owning_user_ns, 0);
364 if (uid_valid(root_uid))
365 parms->creds.uid = root_uid;
366
367 /* fix gid */
368 root_gid = make_kgid(owning_user_ns, 0);
369 if (gid_valid(root_gid))
370 parms->creds.gid = root_gid;
371 }
372
373 ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL);
374 /* ENOBUFS should be handled in userspace */
375 if (ret == -ENOBUFS || ret == -ESRCH)
376 ret = 0;
377
378 return ret;
379 }
380 #endif
381
382 static int kobject_uevent_net_broadcast(struct kobject *kobj,
383 struct kobj_uevent_env *env,
384 const char *action_string,
385 const char *devpath)
386 {
387 int ret = 0;
388
389 #ifdef CONFIG_NET
390 const struct kobj_ns_type_operations *ops;
391 const struct net *net = NULL;
392
393 ops = kobj_ns_ops(kobj);
394 if (!ops && kobj->kset) {
395 struct kobject *ksobj = &kobj->kset->kobj;
396
397 if (ksobj->parent != NULL)
398 ops = kobj_ns_ops(ksobj->parent);
399 }
400
401 /* kobjects currently only carry network namespace tags and they
402 * are the only tag relevant here since we want to decide which
403 * network namespaces to broadcast the uevent into.
404 */
405 if (ops && ops->netlink_ns && kobj->ktype->namespace)
406 if (ops->type == KOBJ_NS_TYPE_NET)
407 net = kobj->ktype->namespace(kobj);
408
409 if (!net)
410 ret = uevent_net_broadcast_untagged(env, action_string,
411 devpath);
412 else
413 ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env,
414 action_string, devpath);
415 #endif
416
417 return ret;
418 }
419
420 static void zap_modalias_env(struct kobj_uevent_env *env)
421 {
422 static const char modalias_prefix[] = "MODALIAS=";
423 size_t len;
424 int i, j;
425
426 for (i = 0; i < env->envp_idx;) {
427 if (strncmp(env->envp[i], modalias_prefix,
428 sizeof(modalias_prefix) - 1)) {
429 i++;
430 continue;
431 }
432
433 len = strlen(env->envp[i]) + 1;
434
435 if (i != env->envp_idx - 1) {
436 memmove(env->envp[i], env->envp[i + 1],
437 env->buflen - len);
438
439 for (j = i; j < env->envp_idx - 1; j++)
440 env->envp[j] = env->envp[j + 1] - len;
441 }
442
443 env->envp_idx--;
444 env->buflen -= len;
445 }
446 }
447
448 /**
449 * kobject_uevent_env - send an uevent with environmental data
450 *
451 * @kobj: struct kobject that the action is happening to
452 * @action: action that is happening
453 * @envp_ext: pointer to environmental data
454 *
455 * Returns 0 if kobject_uevent_env() is completed with success or the
456 * corresponding error when it fails.
457 */
458 int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
459 char *envp_ext[])
460 {
461 struct kobj_uevent_env *env;
462 const char *action_string = kobject_actions[action];
463 const char *devpath = NULL;
464 const char *subsystem;
465 struct kobject *top_kobj;
466 struct kset *kset;
467 const struct kset_uevent_ops *uevent_ops;
468 int i = 0;
469 int retval = 0;
470
471 /*
472 * Mark "remove" event done regardless of result, for some subsystems
473 * do not want to re-trigger "remove" event via automatic cleanup.
474 */
475 if (action == KOBJ_REMOVE)
476 kobj->state_remove_uevent_sent = 1;
477
478 pr_debug("kobject: '%s' (%p): %s\n",
479 kobject_name(kobj), kobj, __func__);
480
481 /* search the kset we belong to */
482 top_kobj = kobj;
483 while (!top_kobj->kset && top_kobj->parent)
484 top_kobj = top_kobj->parent;
485
486 if (!top_kobj->kset) {
487 pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
488 "without kset!\n", kobject_name(kobj), kobj,
489 __func__);
490 return -EINVAL;
491 }
492
493 kset = top_kobj->kset;
494 uevent_ops = kset->uevent_ops;
495
496 /* skip the event, if uevent_suppress is set*/
497 if (kobj->uevent_suppress) {
498 pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
499 "caused the event to drop!\n",
500 kobject_name(kobj), kobj, __func__);
501 return 0;
502 }
503 /* skip the event, if the filter returns zero. */
504 if (uevent_ops && uevent_ops->filter)
505 if (!uevent_ops->filter(kobj)) {
506 pr_debug("kobject: '%s' (%p): %s: filter function "
507 "caused the event to drop!\n",
508 kobject_name(kobj), kobj, __func__);
509 return 0;
510 }
511
512 /* originating subsystem */
513 if (uevent_ops && uevent_ops->name)
514 subsystem = uevent_ops->name(kobj);
515 else
516 subsystem = kobject_name(&kset->kobj);
517 if (!subsystem) {
518 pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
519 "event to drop!\n", kobject_name(kobj), kobj,
520 __func__);
521 return 0;
522 }
523
524 /* environment buffer */
525 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
526 if (!env)
527 return -ENOMEM;
528
529 /* complete object path */
530 devpath = kobject_get_path(kobj, GFP_KERNEL);
531 if (!devpath) {
532 retval = -ENOENT;
533 goto exit;
534 }
535
536 /* default keys */
537 retval = add_uevent_var(env, "ACTION=%s", action_string);
538 if (retval)
539 goto exit;
540 retval = add_uevent_var(env, "DEVPATH=%s", devpath);
541 if (retval)
542 goto exit;
543 retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
544 if (retval)
545 goto exit;
546
547 /* keys passed in from the caller */
548 if (envp_ext) {
549 for (i = 0; envp_ext[i]; i++) {
550 retval = add_uevent_var(env, "%s", envp_ext[i]);
551 if (retval)
552 goto exit;
553 }
554 }
555
556 /* let the kset specific function add its stuff */
557 if (uevent_ops && uevent_ops->uevent) {
558 retval = uevent_ops->uevent(kobj, env);
559 if (retval) {
560 pr_debug("kobject: '%s' (%p): %s: uevent() returned "
561 "%d\n", kobject_name(kobj), kobj,
562 __func__, retval);
563 goto exit;
564 }
565 }
566
567 switch (action) {
568 case KOBJ_ADD:
569 /*
570 * Mark "add" event so we can make sure we deliver "remove"
571 * event to userspace during automatic cleanup. If
572 * the object did send an "add" event, "remove" will
573 * automatically generated by the core, if not already done
574 * by the caller.
575 */
576 kobj->state_add_uevent_sent = 1;
577 break;
578
579 case KOBJ_UNBIND:
580 zap_modalias_env(env);
581 break;
582
583 default:
584 break;
585 }
586
587 /* we will send an event, so request a new sequence number */
588 retval = add_uevent_var(env, "SEQNUM=%llu",
589 atomic64_inc_return(&uevent_seqnum));
590 if (retval)
591 goto exit;
592
593 retval = kobject_uevent_net_broadcast(kobj, env, action_string,
594 devpath);
595
596 #ifdef CONFIG_UEVENT_HELPER
597 /* call uevent_helper, usually only enabled during early boot */
598 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
599 struct subprocess_info *info;
600
601 retval = add_uevent_var(env, "HOME=/");
602 if (retval)
603 goto exit;
604 retval = add_uevent_var(env,
605 "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
606 if (retval)
607 goto exit;
608 retval = init_uevent_argv(env, subsystem);
609 if (retval)
610 goto exit;
611
612 retval = -ENOMEM;
613 info = call_usermodehelper_setup(env->argv[0], env->argv,
614 env->envp, GFP_KERNEL,
615 NULL, cleanup_uevent_env, env);
616 if (info) {
617 retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
618 env = NULL; /* freed by cleanup_uevent_env */
619 }
620 }
621 #endif
622
623 exit:
624 kfree(devpath);
625 kfree(env);
626 return retval;
627 }
628 EXPORT_SYMBOL_GPL(kobject_uevent_env);
629
630 /**
631 * kobject_uevent - notify userspace by sending an uevent
632 *
633 * @kobj: struct kobject that the action is happening to
634 * @action: action that is happening
635 *
636 * Returns 0 if kobject_uevent() is completed with success or the
637 * corresponding error when it fails.
638 */
639 int kobject_uevent(struct kobject *kobj, enum kobject_action action)
640 {
641 return kobject_uevent_env(kobj, action, NULL);
642 }
643 EXPORT_SYMBOL_GPL(kobject_uevent);
644
645 /**
646 * add_uevent_var - add key value string to the environment buffer
647 * @env: environment buffer structure
648 * @format: printf format for the key=value pair
649 *
650 * Returns 0 if environment variable was added successfully or -ENOMEM
651 * if no space was available.
652 */
653 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
654 {
655 va_list args;
656 int len;
657
658 if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
659 WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
660 return -ENOMEM;
661 }
662
663 va_start(args, format);
664 len = vsnprintf(&env->buf[env->buflen],
665 sizeof(env->buf) - env->buflen,
666 format, args);
667 va_end(args);
668
669 if (len >= (sizeof(env->buf) - env->buflen)) {
670 WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
671 return -ENOMEM;
672 }
673
674 env->envp[env->envp_idx++] = &env->buf[env->buflen];
675 env->buflen += len + 1;
676 return 0;
677 }
678 EXPORT_SYMBOL_GPL(add_uevent_var);
679
680 #if defined(CONFIG_NET)
681 static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
682 struct netlink_ext_ack *extack)
683 {
684 /* u64 to chars: 2^64 - 1 = 21 chars */
685 char buf[sizeof("SEQNUM=") + 21];
686 struct sk_buff *skbc;
687 int ret;
688
689 /* bump and prepare sequence number */
690 ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu",
691 atomic64_inc_return(&uevent_seqnum));
692 if (ret < 0 || (size_t)ret >= sizeof(buf))
693 return -ENOMEM;
694 ret++;
695
696 /* verify message does not overflow */
697 if ((skb->len + ret) > UEVENT_BUFFER_SIZE) {
698 NL_SET_ERR_MSG(extack, "uevent message too big");
699 return -EINVAL;
700 }
701
702 /* copy skb and extend to accommodate sequence number */
703 skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL);
704 if (!skbc)
705 return -ENOMEM;
706
707 /* append sequence number */
708 skb_put_data(skbc, buf, ret);
709
710 /* remove msg header */
711 skb_pull(skbc, NLMSG_HDRLEN);
712
713 /* set portid 0 to inform userspace message comes from kernel */
714 NETLINK_CB(skbc).portid = 0;
715 NETLINK_CB(skbc).dst_group = 1;
716
717 ret = netlink_broadcast(usk, skbc, 0, 1, GFP_KERNEL);
718 /* ENOBUFS should be handled in userspace */
719 if (ret == -ENOBUFS || ret == -ESRCH)
720 ret = 0;
721
722 return ret;
723 }
724
725 static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
726 struct netlink_ext_ack *extack)
727 {
728 struct net *net;
729 int ret;
730
731 if (!nlmsg_data(nlh))
732 return -EINVAL;
733
734 /*
735 * Verify that we are allowed to send messages to the target
736 * network namespace. The caller must have CAP_SYS_ADMIN in the
737 * owning user namespace of the target network namespace.
738 */
739 net = sock_net(NETLINK_CB(skb).sk);
740 if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) {
741 NL_SET_ERR_MSG(extack, "missing CAP_SYS_ADMIN capability");
742 return -EPERM;
743 }
744
745 ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
746
747 return ret;
748 }
749
750 static void uevent_net_rcv(struct sk_buff *skb)
751 {
752 netlink_rcv_skb(skb, &uevent_net_rcv_skb);
753 }
754
755 static int uevent_net_init(struct net *net)
756 {
757 struct uevent_sock *ue_sk;
758 struct netlink_kernel_cfg cfg = {
759 .groups = 1,
760 .input = uevent_net_rcv,
761 .flags = NL_CFG_F_NONROOT_RECV
762 };
763
764 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
765 if (!ue_sk)
766 return -ENOMEM;
767
768 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
769 if (!ue_sk->sk) {
770 pr_err("kobject_uevent: unable to create netlink socket!\n");
771 kfree(ue_sk);
772 return -ENODEV;
773 }
774
775 net->uevent_sock = ue_sk;
776
777 /* Restrict uevents to initial user namespace. */
778 if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
779 mutex_lock(&uevent_sock_mutex);
780 list_add_tail(&ue_sk->list, &uevent_sock_list);
781 mutex_unlock(&uevent_sock_mutex);
782 }
783
784 return 0;
785 }
786
787 static void uevent_net_exit(struct net *net)
788 {
789 struct uevent_sock *ue_sk = net->uevent_sock;
790
791 if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
792 mutex_lock(&uevent_sock_mutex);
793 list_del(&ue_sk->list);
794 mutex_unlock(&uevent_sock_mutex);
795 }
796
797 netlink_kernel_release(ue_sk->sk);
798 kfree(ue_sk);
799 }
800
801 static struct pernet_operations uevent_net_ops = {
802 .init = uevent_net_init,
803 .exit = uevent_net_exit,
804 };
805
806 static int __init kobject_uevent_init(void)
807 {
808 return register_pernet_subsys(&uevent_net_ops);
809 }
810
811
812 postcore_initcall(kobject_uevent_init);
813 #endif