4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
20 struct user_namespace init_user_ns = {
22 .refcount = ATOMIC_INIT(2),
24 .root_user = &root_user,
26 EXPORT_SYMBOL_GPL(init_user_ns);
29 * UID task count cache, to get fast user lookup in "alloc_uid"
30 * when changing user ID's (ie setuid() and friends).
33 #define UIDHASH_MASK (UIDHASH_SZ - 1)
34 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
35 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
37 static struct kmem_cache *uid_cachep;
40 * The uidhash_lock is mostly taken from process context, but it is
41 * occasionally also taken from softirq/tasklet context, when
42 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
43 * But free_uid() is also called with local interrupts disabled, and running
44 * local_bh_enable() with local interrupts disabled is an error - we'll run
45 * softirq callbacks, and they can unconditionally enable interrupts, and
46 * the caller of free_uid() didn't expect that..
48 static DEFINE_SPINLOCK(uidhash_lock);
50 struct user_struct root_user = {
51 .__count = ATOMIC_INIT(1),
52 .processes = ATOMIC_INIT(1),
53 .files = ATOMIC_INIT(0),
54 .sigpending = ATOMIC_INIT(0),
57 .uid_keyring = &root_user_keyring,
58 .session_keyring = &root_session_keyring,
60 #ifdef CONFIG_FAIR_USER_SCHED
61 .tg = &init_task_group,
66 * These routines must be called with the uidhash spinlock held!
68 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
70 hlist_add_head(&up->uidhash_node, hashent);
73 static void uid_hash_remove(struct user_struct *up)
75 hlist_del_init(&up->uidhash_node);
78 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
80 struct user_struct *user;
83 hlist_for_each_entry(user, h, hashent, uidhash_node) {
84 if (user->uid == uid) {
85 atomic_inc(&user->__count);
93 #ifdef CONFIG_FAIR_USER_SCHED
95 static void sched_destroy_user(struct user_struct *up)
97 sched_destroy_group(up->tg);
100 static int sched_create_user(struct user_struct *up)
104 up->tg = sched_create_group();
111 static void sched_switch_user(struct task_struct *p)
116 #else /* CONFIG_FAIR_USER_SCHED */
118 static void sched_destroy_user(struct user_struct *up) { }
119 static int sched_create_user(struct user_struct *up) { return 0; }
120 static void sched_switch_user(struct task_struct *p) { }
122 #endif /* CONFIG_FAIR_USER_SCHED */
124 #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
126 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
127 static DEFINE_MUTEX(uids_mutex);
129 static inline void uids_mutex_lock(void)
131 mutex_lock(&uids_mutex);
134 static inline void uids_mutex_unlock(void)
136 mutex_unlock(&uids_mutex);
139 /* uid directory attributes */
140 static ssize_t cpu_shares_show(struct kobject *kobj,
141 struct kobj_attribute *attr,
144 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
146 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
149 static ssize_t cpu_shares_store(struct kobject *kobj,
150 struct kobj_attribute *attr,
151 const char *buf, size_t size)
153 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
154 unsigned long shares;
157 sscanf(buf, "%lu", &shares);
159 rc = sched_group_set_shares(up->tg, shares);
161 return (rc ? rc : size);
164 static struct kobj_attribute cpu_share_attr =
165 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
167 static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
168 struct kobj_attribute *attr,
171 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
173 return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg));
176 static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
177 struct kobj_attribute *attr,
178 const char *buf, size_t size)
180 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
181 unsigned long rt_runtime;
184 sscanf(buf, "%lu", &rt_runtime);
186 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
188 return (rc ? rc : size);
191 static struct kobj_attribute cpu_rt_runtime_attr =
192 __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
194 /* default attributes per uid directory */
195 static struct attribute *uids_attributes[] = {
196 &cpu_share_attr.attr,
197 &cpu_rt_runtime_attr.attr,
201 /* the lifetime of user_struct is not managed by the core (now) */
202 static void uids_release(struct kobject *kobj)
207 static struct kobj_type uids_ktype = {
208 .sysfs_ops = &kobj_sysfs_ops,
209 .default_attrs = uids_attributes,
210 .release = uids_release,
213 /* create /sys/kernel/uids/<uid>/cpu_share file for this user */
214 static int uids_user_create(struct user_struct *up)
216 struct kobject *kobj = &up->kobj;
219 memset(kobj, 0, sizeof(struct kobject));
220 kobj->kset = uids_kset;
221 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
227 kobject_uevent(kobj, KOBJ_ADD);
232 /* create these entries in sysfs:
233 * "/sys/kernel/uids" directory
234 * "/sys/kernel/uids/0" directory (for root user)
235 * "/sys/kernel/uids/0/cpu_share" file (for root user)
237 int __init uids_sysfs_init(void)
239 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
243 return uids_user_create(&root_user);
246 /* work function to remove sysfs directory for a user and free up
247 * corresponding structures.
249 static void remove_user_sysfs_dir(struct work_struct *w)
251 struct user_struct *up = container_of(w, struct user_struct, work);
255 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
260 local_irq_save(flags);
262 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
265 spin_unlock_irqrestore(&uidhash_lock, flags);
267 local_irq_restore(flags);
273 kobject_uevent(&up->kobj, KOBJ_REMOVE);
274 kobject_del(&up->kobj);
275 kobject_put(&up->kobj);
277 sched_destroy_user(up);
278 key_put(up->uid_keyring);
279 key_put(up->session_keyring);
280 kmem_cache_free(uid_cachep, up);
286 /* IRQs are disabled and uidhash_lock is held upon function entry.
287 * IRQ state (as stored in flags) is restored and uidhash_lock released
288 * upon function exit.
290 static inline void free_user(struct user_struct *up, unsigned long flags)
292 /* restore back the count */
293 atomic_inc(&up->__count);
294 spin_unlock_irqrestore(&uidhash_lock, flags);
296 INIT_WORK(&up->work, remove_user_sysfs_dir);
297 schedule_work(&up->work);
300 #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
302 int uids_sysfs_init(void) { return 0; }
303 static inline int uids_user_create(struct user_struct *up) { return 0; }
304 static inline void uids_mutex_lock(void) { }
305 static inline void uids_mutex_unlock(void) { }
307 /* IRQs are disabled and uidhash_lock is held upon function entry.
308 * IRQ state (as stored in flags) is restored and uidhash_lock released
309 * upon function exit.
311 static inline void free_user(struct user_struct *up, unsigned long flags)
314 spin_unlock_irqrestore(&uidhash_lock, flags);
315 sched_destroy_user(up);
316 key_put(up->uid_keyring);
317 key_put(up->session_keyring);
318 kmem_cache_free(uid_cachep, up);
324 * Locate the user_struct for the passed UID. If found, take a ref on it. The
325 * caller must undo that ref with free_uid().
327 * If the user_struct could not be found, return NULL.
329 struct user_struct *find_user(uid_t uid)
331 struct user_struct *ret;
333 struct user_namespace *ns = current->nsproxy->user_ns;
335 spin_lock_irqsave(&uidhash_lock, flags);
336 ret = uid_hash_find(uid, uidhashentry(ns, uid));
337 spin_unlock_irqrestore(&uidhash_lock, flags);
341 void free_uid(struct user_struct *up)
348 local_irq_save(flags);
349 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
350 free_user(up, flags);
352 local_irq_restore(flags);
355 struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
357 struct hlist_head *hashent = uidhashentry(ns, uid);
358 struct user_struct *up, *new;
360 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
365 spin_lock_irq(&uidhash_lock);
366 up = uid_hash_find(uid, hashent);
367 spin_unlock_irq(&uidhash_lock);
370 new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
375 atomic_set(&new->__count, 1);
376 atomic_set(&new->processes, 0);
377 atomic_set(&new->files, 0);
378 atomic_set(&new->sigpending, 0);
379 #ifdef CONFIG_INOTIFY_USER
380 atomic_set(&new->inotify_watches, 0);
381 atomic_set(&new->inotify_devs, 0);
383 #ifdef CONFIG_POSIX_MQUEUE
388 if (alloc_uid_keyring(new, current) < 0)
391 if (sched_create_user(new) < 0)
394 if (uids_user_create(new))
395 goto out_destoy_sched;
398 * Before adding this, check whether we raced
399 * on adding the same user already..
401 spin_lock_irq(&uidhash_lock);
402 up = uid_hash_find(uid, hashent);
404 /* This case is not possible when CONFIG_FAIR_USER_SCHED
405 * is defined, since we serialize alloc_uid() using
406 * uids_mutex. Hence no need to call
407 * sched_destroy_user() or remove_user_sysfs_dir().
409 key_put(new->uid_keyring);
410 key_put(new->session_keyring);
411 kmem_cache_free(uid_cachep, new);
413 uid_hash_insert(new, hashent);
416 spin_unlock_irq(&uidhash_lock);
425 sched_destroy_user(new);
427 key_put(new->uid_keyring);
428 key_put(new->session_keyring);
430 kmem_cache_free(uid_cachep, new);
436 void switch_uid(struct user_struct *new_user)
438 struct user_struct *old_user;
440 /* What if a process setreuid()'s and this brings the
441 * new uid over his NPROC rlimit? We can check this now
442 * cheaply with the new uid cache, so if it matters
443 * we should be checking for it. -DaveM
445 old_user = current->user;
446 atomic_inc(&new_user->processes);
447 atomic_dec(&old_user->processes);
448 switch_uid_keyring(new_user);
449 current->user = new_user;
450 sched_switch_user(current);
453 * We need to synchronize with __sigqueue_alloc()
454 * doing a get_uid(p->user).. If that saw the old
455 * user value, we need to wait until it has exited
456 * its critical region before we can free the old
460 spin_unlock_wait(¤t->sighand->siglock);
466 #ifdef CONFIG_USER_NS
467 void release_uids(struct user_namespace *ns)
471 struct hlist_head *head;
472 struct hlist_node *nd;
474 spin_lock_irqsave(&uidhash_lock, flags);
476 * collapse the chains so that the user_struct-s will
477 * be still alive, but not in hashes. subsequent free_uid()
480 for (i = 0; i < UIDHASH_SZ; i++) {
481 head = ns->uidhash_table + i;
482 while (!hlist_empty(head)) {
487 spin_unlock_irqrestore(&uidhash_lock, flags);
489 free_uid(ns->root_user);
493 static int __init uid_cache_init(void)
497 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
498 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
500 for(n = 0; n < UIDHASH_SZ; ++n)
501 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
503 /* Insert the root user immediately (init already runs as root) */
504 spin_lock_irq(&uidhash_lock);
505 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
506 spin_unlock_irq(&uidhash_lock);
511 module_init(uid_cache_init);