linux内存管理之slab数据结构三巨头

来源:互联网 发布:网络异常360修复不了 编辑:程序博客网 时间:2024/06/10 00:37
Slab.c (c:\linux\linux-2.6.23\mm)/* * struct kmem_cache * * manages a cache. */struct kmem_cache {/* 1) per-cpu data, touched during every alloc/free */    struct array_cache *array[NR_CPUS];/* 2) Cache tunables. Protected by cache_chain_mutex */    unsigned int batchcount;    unsigned int limit;    unsigned int shared;    unsigned int buffer_size;    u32 reciprocal_buffer_size;/* 3) touched by every alloc & free from the backend */    unsigned int flags;     /* constant flags */    unsigned int num;       /* # of objs per slab *//* 4) cache_grow/shrink */    /* order of pgs per slab (2^n) */    unsigned int gfporder;    /* force GFP flags, e.g. GFP_DMA */    gfp_t gfpflags;    size_t colour;          /* cache colouring range */    unsigned int colour_off;    /* colour offset */    struct kmem_cache *slabp_cache;    unsigned int slab_size;    unsigned int dflags;        /* dynamic flags */    /* constructor func */    void (*ctor) (void *, struct kmem_cache *, unsigned long);/* 5) cache creation/removal */    const char *name;    struct list_head next;/* 6) statistics */#if STATS    unsigned long num_active;    unsigned long num_allocations;    unsigned long high_mark;    unsigned long grown;    unsigned long reaped;    unsigned long errors;    unsigned long max_freeable;    unsigned long node_allocs;    unsigned long node_frees;    unsigned long node_overflow;    atomic_t allochit;    atomic_t allocmiss;    atomic_t freehit;    atomic_t freemiss;#endif#if DEBUG    /*     * If debugging is enabled, then the allocator can add additional     * fields and/or padding to every object. buffer_size contains the total     * object size including these internal fields, the following two     * variables contain the offset to the user object and its size.     */    int obj_offset;    int obj_size;#endif    /*     * We put nodelists[] at the end of kmem_cache, because we want to size     * this array to nr_node_ids slots instead of MAX_NUMNODES     * (see kmem_cache_init())     * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache     * is statically defined, so we reserve the max number of nodes.     */    struct kmem_list3 *nodelists[MAX_NUMNODES];    /*     * Do not add fields after nodelists[]     */};
Slab.c (c:\linux\linux-2.6.23\mm)/* * The slab lists for all objects. */struct kmem_list3 {struct list_head slabs_partial;/* partial list first, better asm code */struct list_head slabs_full;struct list_head slabs_free;unsigned long free_objects;unsigned int free_limit;unsigned int colour_next;/* Per-node cache coloring */spinlock_t list_lock;struct array_cache *shared;/* shared per node */struct array_cache **alien;/* on other nodes */unsigned long next_reap;/* updated without locking */int free_touched;/* updated without locking */};/* * struct slab * * Manages the objs in a slab. Placed either at the beginning of mem allocated * for a slab, or allocated from an general cache. * Slabs are chained into three list: fully used, partial, fully free slabs. */struct slab {    struct list_head list;    unsigned long colouroff;    void *s_mem;        /* including colour offset */    unsigned int inuse; /* num of objs active in slab */    kmem_bufctl_t free;    unsigned short nodeid;};

初始化

kmem_cache_init    =>for (i = 0; i < NUM_INIT_LISTS; i++) { //初始化kmem_list3        kmem_list3_init(&initkmem_list3[i]);        if (i < MAX_NUMNODES)            cache_cache.nodelists[i] = NULL;    }    =>INIT_LIST_HEAD(&cache_chain); //初始化kmem_cache的各个结构体    list_add(&cache_cache.next, &cache_chain);    cache_cache.colour_off = cache_line_size();    cache_cache.array[smp_processor_id()] = &initarray_cache.cache;    cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];    =>sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,  //分配空间                    sizes[INDEX_AC].cs_size,                    ARCH_KMALLOC_MINALIGN,                    ARCH_KMALLOC_FLAGS|SLAB_PANIC,                    NULL);    if (!sizes->cs_cachep) {            sizes->cs_cachep = kmem_cache_create(names->name,                    sizes->cs_size,                    ARCH_KMALLOC_MINALIGN,                    ARCH_KMALLOC_FLAGS|SLAB_PANIC,                    NULL);        }#ifdef CONFIG_ZONE_DMA        sizes->cs_dmacachep = kmem_cache_create(                    names->name_dma,                    sizes->cs_size,                    ARCH_KMALLOC_MINALIGN,                    ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|                        SLAB_PANIC,                    NULL);#endif
原创粉丝点击