2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/bootmem.h>
28 #include <linux/hash.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/init_task.h>
32 #define pid_hashfn(nr, ns) \
33 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
34 static struct hlist_head *pid_hash;
35 static int pidhash_shift;
36 struct pid init_struct_pid = INIT_STRUCT_PID;
38 int pid_max = PID_MAX_DEFAULT;
40 #define RESERVED_PIDS 300
42 int pid_max_min = RESERVED_PIDS + 1;
43 int pid_max_max = PID_MAX_LIMIT;
45 #define BITS_PER_PAGE (PAGE_SIZE*8)
46 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
48 static inline int mk_pid(struct pid_namespace *pid_ns,
49 struct pidmap *map, int off)
51 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
54 #define find_next_offset(map, off) \
55 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
58 * PID-map pages start out as NULL, they get allocated upon
59 * first use and are never deallocated. This way a low pid_max
60 * value does not cause lots of bitmaps to be allocated, but
61 * the scheme scales to up to 4 million PIDs, runtime.
63 struct pid_namespace init_pid_ns = {
65 .refcount = ATOMIC_INIT(2),
68 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
72 .child_reaper = &init_task,
74 EXPORT_SYMBOL_GPL(init_pid_ns);
76 int is_global_init(struct task_struct *tsk)
78 return tsk == init_pid_ns.child_reaper;
82 * Note: disable interrupts while the pidmap_lock is held as an
83 * interrupt might come in and do read_lock(&tasklist_lock).
85 * If we don't disable interrupts there is a nasty deadlock between
86 * detach_pid()->free_pid() and another cpu that does
87 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
88 * read_lock(&tasklist_lock);
90 * After we clean up the tasklist_lock and know there are no
91 * irq handlers that take it we can leave the interrupts enabled.
92 * For now it is easier to be safe than to prove it can't happen.
95 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
97 static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
99 struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
100 int offset = pid & BITS_PER_PAGE_MASK;
102 clear_bit(offset, map->page);
103 atomic_inc(&map->nr_free);
106 static int alloc_pidmap(struct pid_namespace *pid_ns)
108 int i, offset, max_scan, pid, last = pid_ns->last_pid;
114 offset = pid & BITS_PER_PAGE_MASK;
115 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
116 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
117 for (i = 0; i <= max_scan; ++i) {
118 if (unlikely(!map->page)) {
119 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
121 * Free the page if someone raced with us
124 spin_lock_irq(&pidmap_lock);
129 spin_unlock_irq(&pidmap_lock);
130 if (unlikely(!map->page))
133 if (likely(atomic_read(&map->nr_free))) {
135 if (!test_and_set_bit(offset, map->page)) {
136 atomic_dec(&map->nr_free);
137 pid_ns->last_pid = pid;
140 offset = find_next_offset(map, offset);
141 pid = mk_pid(pid_ns, map, offset);
143 * find_next_offset() found a bit, the pid from it
144 * is in-bounds, and if we fell back to the last
145 * bitmap block and the final block was the same
146 * as the starting point, pid is before last_pid.
148 } while (offset < BITS_PER_PAGE && pid < pid_max &&
149 (i != max_scan || pid < last ||
150 !((last+1) & BITS_PER_PAGE_MASK)));
152 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
156 map = &pid_ns->pidmap[0];
157 offset = RESERVED_PIDS;
158 if (unlikely(last == offset))
161 pid = mk_pid(pid_ns, map, offset);
166 static int next_pidmap(struct pid_namespace *pid_ns, int last)
169 struct pidmap *map, *end;
171 offset = (last + 1) & BITS_PER_PAGE_MASK;
172 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
173 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
174 for (; map < end; map++, offset = 0) {
175 if (unlikely(!map->page))
177 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
178 if (offset < BITS_PER_PAGE)
179 return mk_pid(pid_ns, map, offset);
184 fastcall void put_pid(struct pid *pid)
186 struct pid_namespace *ns;
191 ns = pid->numbers[pid->level].ns;
192 if ((atomic_read(&pid->count) == 1) ||
193 atomic_dec_and_test(&pid->count)) {
194 kmem_cache_free(ns->pid_cachep, pid);
195 if (ns != &init_pid_ns)
199 EXPORT_SYMBOL_GPL(put_pid);
201 static void delayed_put_pid(struct rcu_head *rhp)
203 struct pid *pid = container_of(rhp, struct pid, rcu);
207 fastcall void free_pid(struct pid *pid)
209 /* We can be called with write_lock_irq(&tasklist_lock) held */
213 spin_lock_irqsave(&pidmap_lock, flags);
214 for (i = 0; i <= pid->level; i++)
215 hlist_del_rcu(&pid->numbers[i].pid_chain);
216 spin_unlock_irqrestore(&pidmap_lock, flags);
218 for (i = 0; i <= pid->level; i++)
219 free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
221 call_rcu(&pid->rcu, delayed_put_pid);
224 struct pid *alloc_pid(struct pid_namespace *ns)
229 struct pid_namespace *tmp;
232 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
237 for (i = ns->level; i >= 0; i--) {
238 nr = alloc_pidmap(tmp);
242 pid->numbers[i].nr = nr;
243 pid->numbers[i].ns = tmp;
247 if (ns != &init_pid_ns)
250 pid->level = ns->level;
251 pid->nr = pid->numbers[0].nr;
252 atomic_set(&pid->count, 1);
253 for (type = 0; type < PIDTYPE_MAX; ++type)
254 INIT_HLIST_HEAD(&pid->tasks[type]);
256 spin_lock_irq(&pidmap_lock);
257 for (i = ns->level; i >= 0; i--) {
258 upid = &pid->numbers[i];
259 hlist_add_head_rcu(&upid->pid_chain,
260 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
262 spin_unlock_irq(&pidmap_lock);
268 for (i++; i <= ns->level; i++)
269 free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
271 kmem_cache_free(ns->pid_cachep, pid);
276 struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns)
278 struct hlist_node *elem;
281 hlist_for_each_entry_rcu(pnr, elem,
282 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
283 if (pnr->nr == nr && pnr->ns == ns)
284 return container_of(pnr, struct pid,
289 EXPORT_SYMBOL_GPL(find_pid_ns);
292 * attach_pid() must be called with the tasklist_lock write-held.
294 int fastcall attach_pid(struct task_struct *task, enum pid_type type,
297 struct pid_link *link;
299 link = &task->pids[type];
301 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
306 void fastcall detach_pid(struct task_struct *task, enum pid_type type)
308 struct pid_link *link;
312 link = &task->pids[type];
315 hlist_del_rcu(&link->node);
318 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
319 if (!hlist_empty(&pid->tasks[tmp]))
325 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
326 void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
329 new->pids[type].pid = old->pids[type].pid;
330 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
331 old->pids[type].pid = NULL;
334 struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
336 struct task_struct *result = NULL;
338 struct hlist_node *first;
339 first = rcu_dereference(pid->tasks[type].first);
341 result = hlist_entry(first, struct task_struct, pids[(type)].node);
347 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
349 struct task_struct *find_task_by_pid_type_ns(int type, int nr,
350 struct pid_namespace *ns)
352 return pid_task(find_pid_ns(nr, ns), type);
355 EXPORT_SYMBOL(find_task_by_pid_type_ns);
357 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
361 pid = get_pid(task->pids[type].pid);
366 struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
368 struct task_struct *result;
370 result = pid_task(pid, type);
372 get_task_struct(result);
377 struct pid *find_get_pid(pid_t nr)
382 pid = get_pid(find_vpid(nr));
388 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
393 if (pid && ns->level <= pid->level) {
394 upid = &pid->numbers[ns->level];
402 * Used by proc to find the first pid that is greater then or equal to nr.
404 * If there is a pid at nr this function is exactly the same as find_pid.
406 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
411 pid = find_pid_ns(nr, ns);
414 nr = next_pidmap(ns, nr);
419 EXPORT_SYMBOL_GPL(find_get_pid);
424 struct kmem_cache *cachep;
425 struct list_head list;
428 static LIST_HEAD(pid_caches_lh);
429 static DEFINE_MUTEX(pid_caches_mutex);
432 * creates the kmem cache to allocate pids from.
433 * @nr_ids: the number of numerical ids this pid will have to carry
436 static struct kmem_cache *create_pid_cachep(int nr_ids)
438 struct pid_cache *pcache;
439 struct kmem_cache *cachep;
441 mutex_lock(&pid_caches_mutex);
442 list_for_each_entry (pcache, &pid_caches_lh, list)
443 if (pcache->nr_ids == nr_ids)
446 pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
450 snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
451 cachep = kmem_cache_create(pcache->name,
452 /* FIXME add numerical ids here */
453 sizeof(struct pid), 0, SLAB_HWCACHE_ALIGN, NULL);
457 pcache->nr_ids = nr_ids;
458 pcache->cachep = cachep;
459 list_add(&pcache->list, &pid_caches_lh);
461 mutex_unlock(&pid_caches_mutex);
462 return pcache->cachep;
467 mutex_unlock(&pid_caches_mutex);
471 struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
478 void free_pid_ns(struct kref *kref)
480 struct pid_namespace *ns;
482 ns = container_of(kref, struct pid_namespace, kref);
487 * The pid hash table is scaled according to the amount of memory in the
488 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
491 void __init pidhash_init(void)
494 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
496 pidhash_shift = max(4, fls(megabytes * 4));
497 pidhash_shift = min(12, pidhash_shift);
498 pidhash_size = 1 << pidhash_shift;
500 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
501 pidhash_size, pidhash_shift,
502 pidhash_size * sizeof(struct hlist_head));
504 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
506 panic("Could not alloc pidhash!\n");
507 for (i = 0; i < pidhash_size; i++)
508 INIT_HLIST_HEAD(&pid_hash[i]);
511 void __init pidmap_init(void)
513 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
514 /* Reserve PID 0. We never call free_pidmap(0) */
515 set_bit(0, init_pid_ns.pidmap[0].page);
516 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
518 init_pid_ns.pid_cachep = create_pid_cachep(1);
519 if (init_pid_ns.pid_cachep == NULL)
520 panic("Can't create pid_1 cachep\n");