Linux kernel & device driver programming

Cross-Referenced Linux and Device Driver Code

[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ]
Version: [ 2.6.11.8 ] [ 2.6.25 ] [ 2.6.25.8 ] [ 2.6.31.13 ] Architecture: [ i386 ]
  1 /*
  2  * SLOB Allocator: Simple List Of Blocks
  3  *
  4  * Matt Mackall <mpm@selenic.com> 12/30/03
  5  *
  6  * NUMA support by Paul Mundt, 2007.
  7  *
  8  * How SLOB works:
  9  *
 10  * The core of SLOB is a traditional K&R style heap allocator, with
 11  * support for returning aligned objects. The granularity of this
 12  * allocator is as little as 2 bytes, however typically most architectures
 13  * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
 14  *
 15  * The slob heap is a set of linked list of pages from alloc_pages(),
 16  * and within each page, there is a singly-linked list of free blocks
 17  * (slob_t). The heap is grown on demand. To reduce fragmentation,
 18  * heap pages are segregated into three lists, with objects less than
 19  * 256 bytes, objects less than 1024 bytes, and all other objects.
 20  *
 21  * Allocation from heap involves first searching for a page with
 22  * sufficient free blocks (using a next-fit-like approach) followed by
 23  * a first-fit scan of the page. Deallocation inserts objects back
 24  * into the free list in address order, so this is effectively an
 25  * address-ordered first fit.
 26  *
 27  * Above this is an implementation of kmalloc/kfree. Blocks returned
 28  * from kmalloc are prepended with a 4-byte header with the kmalloc size.
 29  * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
 30  * alloc_pages() directly, allocating compound pages so the page order
 31  * does not have to be separately tracked, and also stores the exact
 32  * allocation size in page->private so that it can be used to accurately
 33  * provide ksize(). These objects are detected in kfree() because slob_page()
 34  * is false for them.
 35  *
 36  * SLAB is emulated on top of SLOB by simply calling constructors and
 37  * destructors for every SLAB allocation. Objects are returned with the
 38  * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
 39  * case the low-level allocator will fragment blocks to create the proper
 40  * alignment. Again, objects of page-size or greater are allocated by
 41  * calling alloc_pages(). As SLAB objects know their size, no separate
 42  * size bookkeeping is necessary and there is essentially no allocation
 43  * space overhead, and compound pages aren't needed for multi-page
 44  * allocations.
 45  *
 46  * NUMA support in SLOB is fairly simplistic, pushing most of the real
 47  * logic down to the page allocator, and simply doing the node accounting
 48  * on the upper levels. In the event that a node id is explicitly
 49  * provided, alloc_pages_node() with the specified node id is used
 50  * instead. The common case (or when the node id isn't explicitly provided)
 51  * will default to the current node, as per numa_node_id().
 52  *
 53  * Node aware pages are still inserted in to the global freelist, and
 54  * these are scanned for by matching against the node id encoded in the
 55  * page flags. As a result, block allocations that can be satisfied from
 56  * the freelist will only be done so on pages residing on the same node,
 57  * in order to prevent random node placement.
 58  */
 59 
 60 #include <linux/kernel.h>
 61 #include <linux/slab.h>
 62 #include <linux/mm.h>
 63 #include <linux/cache.h>
 64 #include <linux/init.h>
 65 #include <linux/module.h>
 66 #include <linux/rcupdate.h>
 67 #include <linux/list.h>
 68 #include <asm/atomic.h>
 69 
 70 /*
 71  * slob_block has a field 'units', which indicates size of block if +ve,
 72  * or offset of next block if -ve (in SLOB_UNITs).
 73  *
 74  * Free blocks of size 1 unit simply contain the offset of the next block.
 75  * Those with larger size contain their size in the first SLOB_UNIT of
 76  * memory, and the offset of the next free block in the second SLOB_UNIT.
 77  */
 78 #if PAGE_SIZE <= (32767 * 2)
 79 typedef s16 slobidx_t;
 80 #else
 81 typedef s32 slobidx_t;
 82 #endif
 83 
 84 struct slob_block {
 85         slobidx_t units;
 86 };
 87 typedef struct slob_block slob_t;
 88 
 89 /*
 90  * We use struct page fields to manage some slob allocation aspects,
 91  * however to avoid the horrible mess in include/linux/mm_types.h, we'll
 92  * just define our own struct page type variant here.
 93  */
 94 struct slob_page {
 95         union {
 96                 struct {
 97                         unsigned long flags;    /* mandatory */
 98                         atomic_t _count;        /* mandatory */
 99                         slobidx_t units;        /* free units left in page */
100                         unsigned long pad[2];
101                         slob_t *free;           /* first free slob_t in page */
102                         struct list_head list;  /* linked list of free pages */
103                 };
104                 struct page page;
105         };
106 };
107 static inline void struct_slob_page_wrong_size(void)
108 { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
109 
110 /*
111  * free_slob_page: call before a slob_page is returned to the page allocator.
112  */
113 static inline void free_slob_page(struct slob_page *sp)
114 {
115         reset_page_mapcount(&sp->page);
116         sp->page.mapping = NULL;
117 }
118 
119 /*
120  * All partially free slob pages go on these lists.
121  */
122 #define SLOB_BREAK1 256
123 #define SLOB_BREAK2 1024
124 static LIST_HEAD(free_slob_small);
125 static LIST_HEAD(free_slob_medium);
126 static LIST_HEAD(free_slob_large);
127 
128 /*
129  * slob_page: True for all slob pages (false for bigblock pages)
130  */
131 static inline int slob_page(struct slob_page *sp)
132 {
133         return test_bit(PG_active, &sp->flags);
134 }
135 
136 static inline void set_slob_page(struct slob_page *sp)
137 {
138         __set_bit(PG_active, &sp->flags);
139 }
140 
141 static inline void clear_slob_page(struct slob_page *sp)
142 {
143         __clear_bit(PG_active, &sp->flags);
144 }
145 
146 /*
147  * slob_page_free: true for pages on free_slob_pages list.
148  */
149 static inline int slob_page_free(struct slob_page *sp)
150 {
151         return test_bit(PG_private, &sp->flags);
152 }
153 
154 static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
155 {
156         list_add(&sp->list, list);
157         __set_bit(PG_private, &sp->flags);
158 }
159 
160 static inline void clear_slob_page_free(struct slob_page *sp)
161 {
162         list_del(&sp->list);
163         __clear_bit(PG_private, &sp->flags);
164 }
165 
166 #define SLOB_UNIT sizeof(slob_t)
167 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
168 #define SLOB_ALIGN L1_CACHE_BYTES
169 
170 /*
171  * struct slob_rcu is inserted at the tail of allocated slob blocks, which
172  * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
173  * the block using call_rcu.
174  */
175 struct slob_rcu {
176         struct rcu_head head;
177         int size;
178 };
179 
180 /*
181  * slob_lock protects all slob allocator structures.
182  */
183 static DEFINE_SPINLOCK(slob_lock);
184 
185 /*
186  * Encode the given size and next info into a free slob block s.
187  */
188 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
189 {
190         slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
191         slobidx_t offset = next - base;
192 
193         if (size > 1) {
194                 s[0].units = size;
195                 s[1].units = offset;
196         } else
197                 s[0].units = -offset;
198 }
199 
200 /*
201  * Return the size of a slob block.
202  */
203 static slobidx_t slob_units(slob_t *s)
204 {
205         if (s->units > 0)
206                 return s->units;
207         return 1;
208 }
209 
210 /*
211  * Return the next free slob block pointer after this one.
212  */
213 static slob_t *slob_next(slob_t *s)
214 {
215         slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
216         slobidx_t next;
217 
218         if (s[0].units < 0)
219                 next = -s[0].units;
220         else
221                 next = s[1].units;
222         return base+next;
223 }
224 
225 /*
226  * Returns true if s is the last free block in its page.
227  */
228 static int slob_last(slob_t *s)
229 {
230         return !((unsigned long)slob_next(s) & ~PAGE_MASK);
231 }
232 
233 static void *slob_new_page(gfp_t gfp, int order, int node)
234 {
235         void *page;
236 
237 #ifdef CONFIG_NUMA
238         if (node != -1)
239                 page = alloc_pages_node(node, gfp, order);
240         else
241 #endif
242                 page = alloc_pages(gfp, order);
243 
244         if (!page)
245                 return NULL;
246 
247         return page_address(page);
248 }
249 
250 /*
251  * Allocate a slob block within a given slob_page sp.
252  */
253 static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
254 {
255         slob_t *prev, *cur, *aligned = 0;
256         int delta = 0, units = SLOB_UNITS(size);
257 
258         for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
259                 slobidx_t avail = slob_units(cur);
260 
261                 if (align) {
262                         aligned = (slob_t *)ALIGN((unsigned long)cur, align);
263                         delta = aligned - cur;
264                 }
265                 if (avail >= units + delta) { /* room enough? */
266                         slob_t *next;
267 
268                         if (delta) { /* need to fragment head to align? */
269                                 next = slob_next(cur);
270                                 set_slob(aligned, avail - delta, next);
271                                 set_slob(cur, delta, aligned);
272                                 prev = cur;
273                                 cur = aligned;
274                                 avail = slob_units(cur);
275                         }
276 
277                         next = slob_next(cur);
278                         if (avail == units) { /* exact fit? unlink. */
279                                 if (prev)
280                                         set_slob(prev, slob_units(prev), next);
281                                 else
282                                         sp->free = next;
283                         } else { /* fragment */
284                                 if (prev)
285                                         set_slob(prev, slob_units(prev), cur + units);
286                                 else
287                                         sp->free = cur + units;
288                                 set_slob(cur + units, avail - units, next);
289                         }
290 
291                         sp->units -= units;
292                         if (!sp->units)
293                                 clear_slob_page_free(sp);
294                         return cur;
295                 }
296                 if (slob_last(cur))
297                         return NULL;
298         }
299 }
300 
301 /*
302  * slob_alloc: entry point into the slob allocator.
303  */
304 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
305 {
306         struct slob_page *sp;
307         struct list_head *prev;
308         struct list_head *slob_list;
309         slob_t *b = NULL;
310         unsigned long flags;
311 
312         if (size < SLOB_BREAK1)
313                 slob_list = &free_slob_small;
314         else if (size < SLOB_BREAK2)
315                 slob_list = &free_slob_medium;
316         else
317                 slob_list = &free_slob_large;
318 
319         spin_lock_irqsave(&slob_lock, flags);
320         /* Iterate through each partially free page, try to find room */
321         list_for_each_entry(sp, slob_list, list) {
322 #ifdef CONFIG_NUMA
323                 /*
324                  * If there's a node specification, search for a partial
325                  * page with a matching node id in the freelist.
326                  */
327                 if (node != -1 && page_to_nid(&sp->page) != node)
328                         continue;
329 #endif
330                 /* Enough room on this page? */
331                 if (sp->units < SLOB_UNITS(size))
332                         continue;
333 
334                 /* Attempt to alloc */
335                 prev = sp->list.prev;
336                 b = slob_page_alloc(sp, size, align);
337                 if (!b)
338                         continue;
339 
340                 /* Improve fragment distribution and reduce our average
341                  * search time by starting our next search here. (see
342                  * Knuth vol 1, sec 2.5, pg 449) */
343                 if (prev != slob_list->prev &&
344                                 slob_list->next != prev->next)
345                         list_move_tail(slob_list, prev->next);
346                 break;
347         }
348         spin_unlock_irqrestore(&slob_lock, flags);
349 
350         /* Not enough space: must allocate a new page */
351         if (!b) {
352                 b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
353                 if (!b)
354                         return 0;
355                 sp = (struct slob_page *)virt_to_page(b);
356                 set_slob_page(sp);
357 
358                 spin_lock_irqsave(&slob_lock, flags);
359                 sp->units = SLOB_UNITS(PAGE_SIZE);
360                 sp->free = b;
361                 INIT_LIST_HEAD(&sp->list);
362                 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
363                 set_slob_page_free(sp, slob_list);
364                 b = slob_page_alloc(sp, size, align);
365                 BUG_ON(!b);
366                 spin_unlock_irqrestore(&slob_lock, flags);
367         }
368         if (unlikely((gfp & __GFP_ZERO) && b))
369                 memset(b, 0, size);
370         return b;
371 }
372 
373 /*
374  * slob_free: entry point into the slob allocator.
375  */
376 static void slob_free(void *block, int size)
377 {
378         struct slob_page *sp;
379         slob_t *prev, *next, *b = (slob_t *)block;
380         slobidx_t units;
381         unsigned long flags;
382 
383         if (unlikely(ZERO_OR_NULL_PTR(block)))
384                 return;
385         BUG_ON(!size);
386 
387         sp = (struct slob_page *)virt_to_page(block);
388         units = SLOB_UNITS(size);
389 
390         spin_lock_irqsave(&slob_lock, flags);
391 
392         if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
393                 /* Go directly to page allocator. Do not pass slob allocator */
394                 if (slob_page_free(sp))
395                         clear_slob_page_free(sp);
396                 clear_slob_page(sp);
397                 free_slob_page(sp);
398                 free_page((unsigned long)b);
399                 goto out;
400         }
401 
402         if (!slob_page_free(sp)) {
403                 /* This slob page is about to become partially free. Easy! */
404                 sp->units = units;
405                 sp->free = b;
406                 set_slob(b, units,
407                         (void *)((unsigned long)(b +
408                                         SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
409                 set_slob_page_free(sp, &free_slob_small);
410                 goto out;
411         }
412 
413         /*
414          * Otherwise the page is already partially free, so find reinsertion
415          * point.
416          */
417         sp->units += units;
418 
419         if (b < sp->free) {
420                 if (b + units == sp->free) {
421                         units += slob_units(sp->free);
422                         sp->free = slob_next(sp->free);
423                 }
424                 set_slob(b, units, sp->free);
425                 sp->free = b;
426         } else {
427                 prev = sp->free;
428                 next = slob_next(prev);
429                 while (b > next) {
430                         prev = next;
431                         next = slob_next(prev);
432                 }
433 
434                 if (!slob_last(prev) && b + units == next) {
435                         units += slob_units(next);
436                         set_slob(b, units, slob_next(next));
437                 } else
438                         set_slob(b, units, next);
439 
440                 if (prev + slob_units(prev) == b) {
441                         units = slob_units(b) + slob_units(prev);
442                         set_slob(prev, units, slob_next(b));
443                 } else
444                         set_slob(prev, slob_units(prev), b);
445         }
446 out:
447         spin_unlock_irqrestore(&slob_lock, flags);
448 }
449 
450 /*
451  * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
452  */
453 
454 #ifndef ARCH_KMALLOC_MINALIGN
455 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
456 #endif
457 
458 #ifndef ARCH_SLAB_MINALIGN
459 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
460 #endif
461 
462 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
463 {
464         unsigned int *m;
465         int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
466 
467         if (size < PAGE_SIZE - align) {
468                 if (!size)
469                         return ZERO_SIZE_PTR;
470 
471                 m = slob_alloc(size + align, gfp, align, node);
472                 if (m)
473                         *m = size;
474                 return (void *)m + align;
475         } else {
476                 void *ret;
477 
478                 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
479                 if (ret) {
480                         struct page *page;
481                         page = virt_to_page(ret);
482                         page->private = size;
483                 }
484                 return ret;
485         }
486 }
487 EXPORT_SYMBOL(__kmalloc_node);
488 
489 void kfree(const void *block)
490 {
491         struct slob_page *sp;
492 
493         if (unlikely(ZERO_OR_NULL_PTR(block)))
494                 return;
495 
496         sp = (struct slob_page *)virt_to_page(block);
497         if (slob_page(sp)) {
498                 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
499                 unsigned int *m = (unsigned int *)(block - align);
500                 slob_free(m, *m + align);
501         } else
502                 put_page(&sp->page);
503 }
504 EXPORT_SYMBOL(kfree);
505 
506 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
507 size_t ksize(const void *block)
508 {
509         struct slob_page *sp;
510 
511         BUG_ON(!block);
512         if (unlikely(block == ZERO_SIZE_PTR))
513                 return 0;
514 
515         sp = (struct slob_page *)virt_to_page(block);
516         if (slob_page(sp))
517                 return ((slob_t *)block - 1)->units + SLOB_UNIT;
518         else
519                 return sp->page.private;
520 }
521 EXPORT_SYMBOL(ksize);
522 
523 struct kmem_cache {
524         unsigned int size, align;
525         unsigned long flags;
526         const char *name;
527         void (*ctor)(struct kmem_cache *, void *);
528 };
529 
530 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
531         size_t align, unsigned long flags,
532         void (*ctor)(struct kmem_cache *, void *))
533 {
534         struct kmem_cache *c;
535 
536         c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
537 
538         if (c) {
539                 c->name = name;
540                 c->size = size;
541                 if (flags & SLAB_DESTROY_BY_RCU) {
542                         /* leave room for rcu footer at the end of object */
543                         c->size += sizeof(struct slob_rcu);
544                 }
545                 c->flags = flags;
546                 c->ctor = ctor;
547                 /* ignore alignment unless it's forced */
548                 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
549                 if (c->align < ARCH_SLAB_MINALIGN)
550                         c->align = ARCH_SLAB_MINALIGN;
551                 if (c->align < align)
552                         c->align = align;
553         } else if (flags & SLAB_PANIC)
554                 panic("Cannot create slab cache %s\n", name);
555 
556         return c;
557 }
558 EXPORT_SYMBOL(kmem_cache_create);
559 
560 void kmem_cache_destroy(struct kmem_cache *c)
561 {
562         slob_free(c, sizeof(struct kmem_cache));
563 }
564 EXPORT_SYMBOL(kmem_cache_destroy);
565 
566 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
567 {
568         void *b;
569 
570         if (c->size < PAGE_SIZE)
571                 b = slob_alloc(c->size, flags, c->align, node);
572         else
573                 b = slob_new_page(flags, get_order(c->size), node);
574 
575         if (c->ctor)
576                 c->ctor(c, b);
577 
578         return b;
579 }
580 EXPORT_SYMBOL(kmem_cache_alloc_node);
581 
582 static void __kmem_cache_free(void *b, int size)
583 {
584         if (size < PAGE_SIZE)
585                 slob_free(b, size);
586         else
587                 free_pages((unsigned long)b, get_order(size));
588 }
589 
590 static void kmem_rcu_free(struct rcu_head *head)
591 {
592         struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
593         void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
594 
595         __kmem_cache_free(b, slob_rcu->size);
596 }
597 
598 void kmem_cache_free(struct kmem_cache *c, void *b)
599 {
600         if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
601                 struct slob_rcu *slob_rcu;
602                 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
603                 INIT_RCU_HEAD(&slob_rcu->head);
604                 slob_rcu->size = c->size;
605                 call_rcu(&slob_rcu->head, kmem_rcu_free);
606         } else {
607                 __kmem_cache_free(b, c->size);
608         }
609 }
610 EXPORT_SYMBOL(kmem_cache_free);
611 
612 unsigned int kmem_cache_size(struct kmem_cache *c)
613 {
614         return c->size;
615 }
616 EXPORT_SYMBOL(kmem_cache_size);
617 
618 const char *kmem_cache_name(struct kmem_cache *c)
619 {
620         return c->name;
621 }
622 EXPORT_SYMBOL(kmem_cache_name);
623 
624 int kmem_cache_shrink(struct kmem_cache *d)
625 {
626         return 0;
627 }
628 EXPORT_SYMBOL(kmem_cache_shrink);
629 
630 int kmem_ptr_validate(struct kmem_cache *a, const void *b)
631 {
632         return 0;
633 }
634 
635 static unsigned int slob_ready __read_mostly;
636 
637 int slab_is_available(void)
638 {
639         return slob_ready;
640 }
641 
642 void __init kmem_cache_init(void)
643 {
644         slob_ready = 1;
645 }
646 
  This page was automatically generated by the LXR engine.