kmem_cache_create函数的源码分析

来源:互联网 发布:网络电影痞子兵王 编辑:程序博客网 时间:2024/06/03 14:14

kmem_cache_create函数的源码如下:

/** * kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * * @name must be valid until the cache is destroyed. This implies that * the module calling this has to destroy the cache before getting unloaded. * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline.  This can be beneficial if you're counting cycles as closely * as davem. */struct kmem_cache *kmem_cache_create (const char *name, size_t size, size_t align,unsigned long flags, void (*ctor)(void *)){size_t left_over, slab_size, ralign;struct kmem_cache *cachep = NULL, *pc;gfp_t gfp;/* * Sanity checks... these are all serious usage bugs. */if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||    size > KMALLOC_MAX_SIZE) {printk(KERN_ERR "%s: Early error in slab %s\n", __func__,name);BUG();}/* * We use cache_chain_mutex to ensure a consistent view of * cpu_online_mask as well.  Please see cpuup_callback */if (slab_is_available()) {get_online_cpus();mutex_lock(&cache_chain_mutex);}list_for_each_entry(pc, &cache_chain, next) {char tmp;int res;/* * This happens when the module gets unloaded and doesn't * destroy its slab cache and no-one else reuses the vmalloc * area of the module.  Print a warning. */res = probe_kernel_address(pc->name, tmp);if (res) {printk(KERN_ERR       "SLAB: cache with size %d has lost its name\n",       pc->buffer_size);continue;}if (!strcmp(pc->name, name)) {printk(KERN_ERR       "kmem_cache_create: duplicate cache %s\n", name);dump_stack();goto oops;}}#if DEBUGWARN_ON(strchr(name, ' '));/* It confuses parsers */#if FORCED_DEBUG/* * Enable redzoning and last user accounting, except for caches with * large objects, if the increased size would increase the object size * above the next power of two: caches with object sizes just above a * power of two have a significant amount of internal fragmentation. */if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +2 * sizeof(unsigned long long)))flags |= SLAB_RED_ZONE | SLAB_STORE_USER;if (!(flags & SLAB_DESTROY_BY_RCU))flags |= SLAB_POISON;#endifif (flags & SLAB_DESTROY_BY_RCU)BUG_ON(flags & SLAB_POISON);#endif/* * Always checks flags, a caller might be expecting debug support which * isn't available. */BUG_ON(flags & ~CREATE_MASK);/* * Check that size is in terms of words.  This is needed to avoid * unaligned accesses for some archs when redzoning is used, and makes * sure any on-slab bufctl's are also correctly aligned. */if (size & (BYTES_PER_WORD - 1)) {size += (BYTES_PER_WORD - 1);size &= ~(BYTES_PER_WORD - 1);}/* calculate the final buffer alignment: *//* 1) arch recommendation: can be overridden for debug */if (flags & SLAB_HWCACHE_ALIGN) {/* * Default alignment: as specified by the arch code.  Except if * an object is really small, then squeeze multiple objects into * one cacheline. */ralign = cache_line_size();while (size <= ralign / 2)ralign /= 2;} else {ralign = BYTES_PER_WORD;}/* * Redzoning and user store require word alignment or possibly larger. * Note this will be overridden by architecture or caller mandated * alignment if either is greater than BYTES_PER_WORD. */if (flags & SLAB_STORE_USER)ralign = BYTES_PER_WORD;if (flags & SLAB_RED_ZONE) {ralign = REDZONE_ALIGN;/* If redzoning, ensure that the second redzone is suitably * aligned, by adjusting the object size accordingly. */size += REDZONE_ALIGN - 1;size &= ~(REDZONE_ALIGN - 1);}/* 2) arch mandated alignment */if (ralign < ARCH_SLAB_MINALIGN) {ralign = ARCH_SLAB_MINALIGN;}/* 3) caller mandated alignment */if (ralign < align) {ralign = align;}/* disable debug if necessary */if (ralign > __alignof__(unsigned long long))flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);/* * 4) Store it. */align = ralign;if (slab_is_available())gfp = GFP_KERNEL;elsegfp = GFP_NOWAIT;/* Get cache's description obj. */cachep = kmem_cache_zalloc(&cache_cache, gfp);if (!cachep)goto oops;#if DEBUGcachep->obj_size = size;/* * Both debugging options require word-alignment which is calculated * into align above. */if (flags & SLAB_RED_ZONE) {/* add space for red zone words */cachep->obj_offset += sizeof(unsigned long long);size += 2 * sizeof(unsigned long long);}if (flags & SLAB_STORE_USER) {/* user store requires one word storage behind the end of * the real object. But if the second red zone needs to be * aligned to 64 bits, we must allow that much space. */if (flags & SLAB_RED_ZONE)size += REDZONE_ALIGN;elsesize += BYTES_PER_WORD;}#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)if (size >= malloc_sizes[INDEX_L3 + 1].cs_size    && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);size = PAGE_SIZE;}#endif#endif/* * Determine if the slab management is 'on' or 'off' slab. * (bootstrapping cannot cope with offslab caches so don't do * it too early on. Always use on-slab management when * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) */if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&    !(flags & SLAB_NOLEAKTRACE))/* * Size is large, assume best to place the slab management obj * off-slab (should allow better packing of objs). */flags |= CFLGS_OFF_SLAB;size = ALIGN(size, align);left_over = calculate_slab_order(cachep, size, align, flags);if (!cachep->num) {printk(KERN_ERR       "kmem_cache_create: couldn't create cache %s.\n", name);kmem_cache_free(&cache_cache, cachep);cachep = NULL;goto oops;}slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)  + sizeof(struct slab), align);/* * If the slab has been placed off-slab, and we have enough space then * move it on-slab. This is at the expense of any extra colouring. */if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {flags &= ~CFLGS_OFF_SLAB;left_over -= slab_size;}if (flags & CFLGS_OFF_SLAB) {/* really off slab. No need for manual alignment */slab_size =    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);#ifdef CONFIG_PAGE_POISONING/* If we're going to use the generic kernel_map_pages() * poisoning, then it's going to smash the contents of * the redzone and userword anyhow, so switch them off. */if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);#endif}cachep->colour_off = cache_line_size();/* Offset must be a multiple of the alignment. */if (cachep->colour_off < align)cachep->colour_off = align;cachep->colour = left_over / cachep->colour_off;cachep->slab_size = slab_size;cachep->flags = flags;cachep->gfpflags = 0;if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))cachep->gfpflags |= GFP_DMA;cachep->buffer_size = size;cachep->reciprocal_buffer_size = reciprocal_value(size);if (flags & CFLGS_OFF_SLAB) {cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);/* * This is a possibility for one of the malloc_sizes caches. * But since we go off slab only for object size greater than * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, * this should not happen at all. * But leave a BUG_ON for some lucky dude. */BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));}cachep->ctor = ctor;cachep->name = name;if (setup_cpu_cache(cachep, gfp)) {__kmem_cache_destroy(cachep);cachep = NULL;goto oops;}/* cache setup completed, link it into the list */list_add(&cachep->next, &cache_chain);oops:if (!cachep && (flags & SLAB_PANIC))panic("kmem_cache_create(): failed to create slab `%s'\n",      name);if (slab_is_available()) {mutex_unlock(&cache_chain_mutex);put_online_cpus();}return cachep;}


原创粉丝点击