- 论坛徽章:
- 0
|
/*
* Called with the &cachep->spinlock held, returns number of slabs released
*/调用的时候要保持自旋锁,返回释放的slab数目
static int __kmem_cache_shrink_locked(kmem_cache_t *cachep)
{
slab_t *slabp;
int ret = 0;
/* If the cache is growing, stop shrinking. */如果正在增长,那么不可以收缩
while (!cachep->growing) {
struct list_head *p;
p = cachep->slabs_free.prev;
if (p == &cachep->slabs_free)
break;
slabp = list_entry(cachep->slabs_free.prev, slab_t, list);
#if DEBUG
if (slabp->inuse)
BUG();
#endif
list_del(&slabp->list);遍历链表,从后向前删除
spin_unlock_irq(&cachep->spinlock);发解除自旋锁的中断
kmem_slab_destroy(cachep, slabp);删除缓存区
ret++;
spin_lock_irq(&cachep->spinlock);重新加锁
}
return ret;
}
static int __kmem_cache_shrink(kmem_cache_t *cachep)收缩操作
{
int ret;
drain_cpu_caches(cachep);
spin_lock_irq(&cachep->spinlock);加自旋锁
__kmem_cache_shrink_locked(cachep);执行删除
ret = !list_empty(&cachep->slabs_full) ||
!list_empty(&cachep->slabs_partial);检查是否为空链表,如果是的话返回1,否则返回0
spin_unlock_irq(&cachep->spinlock);解锁
return ret;
}
/**
* kmem_cache_shrink - Shrink a cache.
* @cachep: The cache to shrink.
*
* Releases as many slabs as possible for a cache.
* Returns number of pages released.
*/收缩操作,释放尽量多的slab,返回释放的页面数
int kmem_cache_shrink(kmem_cache_t *cachep)
{
int ret;
if (!cachep || in_interrupt() || !is_chained_kmem_cache(cachep))如果要收缩NULL,正在中断中,要收缩链上的缓存,则报错
BUG();
spin_lock_irq(&cachep->spinlock);发加锁中断
ret = __kmem_cache_shrink_locked(cachep);收缩
spin_unlock_irq(&cachep->spinlock);发解锁中断
return ret gfporder;
}
/**
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*注释大意:kmem_cache_destory删除一个缓存,cachep是将要被删除的缓存
* Remove a kmem_cache_t object from the slab cache.
* Returns 0 on success.
*返回0则成功
* It is expected this function will be called by a module when it is
* unloaded. This will remove the cache completely, and avoid a duplicate
* cache being allocated each time a module is loaded and unloaded, if the
* module doesn't have persistent in-kernel storage across loads and unloads.
*在模块被卸载的时候,这个函数需要被调用。他将完全清除缓存,
以便避免不能永久访问内核存储区的模块每次卸载和挂载的时候会出现重复的缓存被要求分配。
* The cache must be empty before calling this function.
*在调用前,缓存必须是空的
* The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy().
*/调用者必须确认,在执行清除缓存操作的时候,不会有人申请内存
int kmem_cache_destroy (kmem_cache_t * cachep)
{
if (!cachep || in_interrupt() || cachep->growing)如果要销毁NULL,正在中断中,要销毁正在增长的缓存,则报错
BUG();
/* Find the cache in the chain of caches. */
down(&cache_chain_sem);允许访问缓存链
/* the chain is never empty, cache_cache is never destroyed */
if (clock_searchp == cachep)如果链不空,则无法销毁
clock_searchp = list_entry(cachep->next.next,
kmem_cache_t, next);
list_del(&cachep->next);
up(&cache_chain_sem);
if (__kmem_cache_shrink(cachep)) {如果无法一直收缩(非空),则报错
printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",
cachep);
down(&cache_chain_sem);
list_add(&cachep->next,&cache_chain);
up(&cache_chain_sem);
return 1;
}
#ifdef CONFIG_SMP条件编译,多处理器情况
{
int i;
for (i = 0; i cpudata);释放每个cpu相应的缓存的数组
}
#endif
kmem_cache_free(&cache_cache, cachep);释放内存
return 0;
}
/* Get the memory for a slab management obj. */为slab管理对象申请内存
static inline slab_t * kmem_cache_slabmgmt (kmem_cache_t *cachep,
void *objp, int colour_off, int local_flags)
{
slab_t *slabp;
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */如果是off-slab模式的
slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
if (!slabp)出错处理
return NULL;
} else {on-slab模式
/* FIXME: change to
slabp = objp
* if you enable OPTIMIZE
*/
slabp = objp+colour_off;根据着色空间进行偏移
colour_off += L1_CACHE_ALIGN(cachep->num *
sizeof(kmem_bufctl_t) + sizeof(slab_t));计算着色空间更新
}
slabp->inuse = 0;活动对象为0
slabp->colouroff = colour_off;着色空间更新
slabp->s_mem = objp+colour_off;slab块中第一个对象的地址
return slabp;
}
static inline void kmem_cache_init_objs (kmem_cache_t * cachep,
slab_t * slabp, unsigned long ctor_flags)
{
int i;
for (i = 0; i num; i++) {对slab块中每一个对象进行构造
void* objp = slabp->s_mem+cachep->objsize*i;
#if DEBUG
if (cachep->flags & SLAB_RED_ZONE) {
*((unsigned long*)(objp)) = RED_MAGIC1;
*((unsigned long*)(objp + cachep->objsize -
BYTES_PER_WORD)) = RED_MAGIC1;
objp += BYTES_PER_WORD;
}
#endif
/*
* Constructors are not allowed to allocate memory from
* the same cache which they are a constructor for.
* Otherwise, deadlock. They must also be threaded.
*/构造函数不能从他们自己构造的同一个缓存中申请内存,否则会造成死锁。
if (cachep->ctor)
cachep->ctor(objp, cachep, ctor_flags);
#if DEBUG
if (cachep->flags & SLAB_RED_ZONE)
objp -= BYTES_PER_WORD;
if (cachep->flags & SLAB_POISON)
/* need to poison the objs */
kmem_poison_obj(cachep, objp);
if (cachep->flags & SLAB_RED_ZONE) {
if (*((unsigned long*)(objp)) != RED_MAGIC1)
BUG();
if (*((unsigned long*)(objp + cachep->objsize -
BYTES_PER_WORD)) != RED_MAGIC1)
BUG();
}
#endif
slab_bufctl(slabp) = i+1;
}
slab_bufctl(slabp)[i-1] = BUFCTL_END;标示结束(0xffffFFFF)
slabp->free = 0;第一个空闲对象(对象数组的下标设置为0)
}
/*
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/增加1个slab在cache中。这个函数缓存中没有活跃对象时,被kmem_cache_alloc()调用
static int kmem_cache_grow (kmem_cache_t * cachep, int flags)
{
slab_t *slabp;
struct page *page;
void *objp;
size_t offset;
unsigned int i, local_flags;
unsigned long ctor_flags;
unsigned long save_flags;
/* Be lazy and only check for valid flags here,
* keeping it out of the critical path in kmem_cache_alloc().
*/只检查不合法的标示,重要的完整性检查在kmem_cache_alloc中完成
if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
BUG();
if (flags & SLAB_NO_GROW)如果不允许增加,则直接返回
return 0;
/*
* The test for missing atomic flag is performed here, rather than
* the more obvious place, simply to reduce the critical path length
* in kmem_cache_alloc(). If a caller is seriously mis-behaving they
* will eventually be caught here (where it matters).
*/原子操作位一定要设置,以便防止被中断处理程序调用的时候中断处理时间过长。
如果GFP_WAIT GFP_HIGH GFP_IO GFP_HIGHMEM没有被设置则报错
如果正在中断中,报错
if (in_interrupt() && (flags & SLAB_LEVEL_MASK) != SLAB_ATOMIC)
BUG();
ctor_flags = SLAB_CTOR_CONSTRUCTOR;
local_flags = (flags & SLAB_LEVEL_MASK);
if (local_flags == SLAB_ATOMIC)
/*
* Not allowed to sleep. Need to tell a constructor about
* this - it might need to know...
*/告诉构造者,不能进入睡眠状态
ctor_flags |= SLAB_CTOR_ATOMIC;
/* About to mess with non-constant members - lock. */对变量的处理,存中断
spin_lock_irqsave(&cachep->spinlock, save_flags);
/* Get colour for the slab, and cal the next value. */获得slab颜色,算下一个着色值
offset = cachep->colour_next;
cachep->colour_next++;
if (cachep->colour_next >= cachep->colour)如果计数器>=最大值
cachep->colour_next = 0;绕了一圈,回到起点
offset *= cachep->colour_off; offset等于当前slab所有偏移量的大小(offset=数目*每一个对象的偏移量)
cachep->dflags |= DFLGS_GROWN;设置动态增长位
cachep->growing++;标志正在增长
spin_unlock_irqrestore(&cachep->spinlock, save_flags);恢复中断
/* A series of memory allocations for a new slab.
* Neither the cache-chain semaphore, or cache-lock, are
* held, but the incrementing c_growing prevents this
* cache from being reaped or shrunk.
* Note: The cache could be selected in for reaping in
* kmem_cache_reap(), but when the final test is made the
* growing value will be seen.
*/注释大意:为新的slab做的一系列的内存申请操作。
通过c_growing来避免这块缓存被回收或者收缩,而不是通过加互斥锁或者缓存锁来实现。
需要注意,这块缓存可以被选进回收的函数kmem_cache_reap(),但是在那里面会发现这块缓存正在增长所以不回收。
/* Get mem for the objs. */
if (!(objp = kmem_getpages(cachep, flags)))为对象申请页面
goto failed;出错处理
/* Get slab management. */获得slab管理对象
if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, local_flags)))
goto opps1;出错处理
/* Nasty!!!!!! I hope this is OK. */
i = 1 gfporder;i=slab块的页面数
page = virt_to_page(objp);page是指向这个对象所在页面指针
do {
SET_PAGE_CACHE(page, cachep);设置页所在的cache,即(page->list.next=(struct list_head *)cachep
SET_PAGE_SLAB(page, slabp);设置页所在的slab,即(page->list.prev=(struct list_head *)slabp
PageSetSlab(page);设置页在slab中
page++;下一页
} while (--i);循环,直到在slab中的页都设置好了
kmem_cache_init_objs(cachep, slabp, ctor_flags);初始化对象
spin_lock_irqsave(&cachep->spinlock, save_flags);存中断
cachep->growing--;标记缓存不增长
/* Make slab active. */
list_add_tail(&slabp->list, &cachep->slabs_free);将新创建的slab放在该缓存区slab链的尾部
STATS_INC_GROWN(cachep);增长状态
cachep->failures = 0;缓存失效次数等于0
spin_unlock_irqrestore(&cachep->spinlock, save_flags);解锁,回复中断状态
return 1;正常返回
opps1:
kmem_freepages(cachep, objp);释放页面
failed:
spin_lock_irqsave(&cachep->spinlock, save_flags);
cachep->growing--;不增长状态
spin_unlock_irqrestore(&cachep->spinlock, save_flags);
return 0;
}
/*
* Perform extra freeing checks:
* - detect double free
* - detect bad pointers.
* Called with the cache-lock held.
*/检查是否有多余的free,是否有野指针,调用的时候需要有缓存锁
#if DEBUG 条件编译,如果是查错模式
static int kmem_extra_free_checks (kmem_cache_t * cachep,
slab_t *slabp, void * objp)
{
int i;
unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
if (objnr >= cachep->num)
BUG();
if (objp != slabp->s_mem + objnr*cachep->objsize)
BUG();
/* Check slab's freelist to see if this obj is there. */检查对象是否在施放列表里
for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)) {
if (i == objnr)
BUG();
}
return 0;
}
#endif
static inline void kmem_cache_alloc_head(kmem_cache_t *cachep, int flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))如果flags包含SLAB_DMA并且缓存要求新页的flag不包含GFP_DMA则报错
BUG();
} else {
if (cachep->gfpflags & GFP_DMA)如果flags不包含SLAB_DMA并且缓存要求新页的flag包含GFP_DMA则报错
BUG();
}
}
static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
slab_t *slabp)
{
void *objp;
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);统计操作(如果STATS不是1,则相当于空操作)
/* get obj pointer */
slabp->inuse++;slab的活动对象加1
objp = slabp->s_mem + slabp->free*cachep->objsize;
指向该slab中第一个空闲对象地址的指针(=第一个对象地址+空闲数目*对象的大小)
slabp->free=slab_bufctl(slabp)[slabp->free];当前对象的下一个空闲对象在对象数组中的下标(即第几个)
if (unlikely(slabp->free == BUFCTL_END)) {满了,把它放到满了的slab列表中。
list_del(&slabp->list);从当前列表中删除
list_add(&slabp->list, &cachep->slabs_full);放到满了的列表里
}
#if DEBUG 条件编译,调试模式下
if (cachep->flags & SLAB_POISON)在未初始化的slab区域
if (kmem_check_poison_obj(cachep, objp))如果是未初始化的对象,则报错
BUG();
if (cachep->flags & SLAB_RED_ZONE) {在红区
/* Set alloc red-zone, and check old one. */
if (xchg((unsigned long *)objp, RED_MAGIC2) !=如果边界有问题则报错
RED_MAGIC1)
BUG();
if (xchg((unsigned long *)(objp+cachep->objsize -
BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1)对象大小出错则报错
BUG();
objp += BYTES_PER_WORD;
}
#endif
return objp;返回新分配的对象地址
}
/*
* Returns a ptr to an obj in the given cache.
* caller must guarantee synchronization
* #define for the goto optimization 8-)
*/返回一个指向给定的cache中一个对象的指针
调用者必须确保同步
使用define以便保证对goto的优化
#define kmem_cache_alloc_one(cachep) \
({ \
struct list_head * slabs_partial, * entry; \
slab_t *slabp; \
\
slabs_partial = &(cachep)->slabs_partial; \
entry = slabs_partial->next; \
if (unlikely(entry == slabs_partial)) { \
struct list_head * slabs_free; \
slabs_free = &(cachep)->slabs_free; \
entry = slabs_free->next; \
if (unlikely(entry == slabs_free)) \
goto alloc_new_slab; \
list_del(entry); \
list_add(entry, slabs_partial); \
} \
\
slabp = list_entry(entry, slab_t, list); \
kmem_cache_alloc_one_tail(cachep, slabp); \
})
#ifdef CONFIG_SMP 条件编译,对称多处理机的支持,申请一批缓存
void* kmem_cache_alloc_batch(kmem_cache_t* cachep, cpucache_t* cc, int flags)
{
int batchcount = cachep->batchcount;
spin_lock(&cachep->spinlock);
while (batchcount--) {
struct list_head * slabs_partial, * entry;
slab_t *slabp;
/* Get slab alloc is to come from. */
slabs_partial = &(cachep)->slabs_partial;
entry = slabs_partial->next;
if (unlikely(entry == slabs_partial)) {
struct list_head * slabs_free;
slabs_free = &(cachep)->slabs_free;
entry = slabs_free->next;
if (unlikely(entry == slabs_free))
break;
list_del(entry);
list_add(entry, slabs_partial);
}
slabp = list_entry(entry, slab_t, list);
cc_entry(cc)[cc->avail++] =
kmem_cache_alloc_one_tail(cachep, slabp);
}
spin_unlock(&cachep->spinlock);
if (cc->avail)
return cc_entry(cc)[--cc->avail];
return NULL;
}
#endif
static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
{
unsigned long save_flags;
void* objp;
kmem_cache_alloc_head(cachep, flags);检查申请是否合理
try_again:
local_irq_save(save_flags);保存中断
#ifdef CONFIG_SMP 条件编译,对称多处理器的情况
{
cpucache_t *cc = cc_data(cachep);cpu缓存指针指向送入的缓存
if (cc) {有缓存的情况
if (cc->avail) {如果CPU缓存可用
STATS_INC_ALLOCHIT(cachep);命中计数
objp = cc_entry(cc)[--cc->avail];对象的指针指向cpu缓存,并且cpu缓存可用数目减1
} else {cpu缓存不可用
STATS_INC_ALLOCMISS(cachep);没有命中计数加1
objp = kmem_cache_alloc_batch(cachep,cc,flags);对象指针指向批量申请缓存返回的地址
if (!objp)
goto alloc_new_slab_nolock;没有申请下缓存,则不加锁申请新的slab块
}
} else {没有缓存的情况
spin_lock(&cachep->spinlock);加锁
objp = kmem_cache_alloc_one(cachep);申请新的缓存
spin_unlock(&cachep->spinlock);解锁
}
}
#else
objp = kmem_cache_alloc_one(cachep);单处理器,直接申请一个新的对象空间
#endif
local_irq_restore(save_flags);恢复中断
return objp;返回申请的对象指针
alloc_new_slab:
#ifdef CONFIG_SMP 条件编译,如果是对称多处理器,则解锁
spin_unlock(&cachep->spinlock);
alloc_new_slab_nolock:
#endif
local_irq_restore(save_flags);恢复中断
if (kmem_cache_grow(cachep, flags))如果在给定的cache中成功增加了一个slab
/* Someone may have stolen our objs. Doesn't matter, we'll
* just come back here again.
*/其他的人可能会直接要走刚刚申请下来的对象空间。如果出现这种情况,那么还会执行这里
goto try_again;则重新分配对象的空间
return NULL;如果不能给对象分配空间,并且不能在缓存里创建新的slab块,则返回空指针。
}
/*
* Release an obj back to its cache. If the obj has a constructed
* state, it should be in this state _before_ it is released.
* - caller is responsible for the synchronization
*/
#if DEBUG
# define CHECK_NR(pg) \
do { \
if (!VALID_PAGE(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
} \
} while (0)
# define CHECK_PAGE(page) \
do { \
CHECK_NR(page); \
if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
} \
} while (0)
#else
# define CHECK_PAGE(pg) do { } while (0)
#endif
之所以使用do{}while(0)这样的写法是为了保证在任何情况宏被调用的时候都能保持相同的语义
参考:http://www.rtems.com/rtems/maillistArchives/rtems-users/2001/august/msg00056.html
static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
{
slab_t* slabp;
CHECK_PAGE(virt_to_page(objp));检查指向对象的页
/* reduces memory footprint
*
if (OPTIMIZE(cachep))
slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
else
*/
slabp = GET_PAGE_SLAB(virt_to_page(objp));获取对象所在的slab块
#if DEBUG
if (cachep->flags & SLAB_DEBUG_INITIAL)
/* Need to call the slab's constructor so the
* caller can perform a verify of its state (debugging).
* Called without the cache-lock held.
*/
cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
if (cachep->flags & SLAB_RED_ZONE) {
objp -= BYTES_PER_WORD;
if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
/* Either write before start, or a double free. */
BUG();
if (xchg((unsigned long *)(objp+cachep->objsize -
BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
/* Either write past end, or a double free. */
BUG();
}
if (cachep->flags & SLAB_POISON)
kmem_poison_obj(cachep, objp);
if (kmem_extra_free_checks(cachep, slabp, objp))
return;
#endif
{
unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
slab_bufctl(slabp)[objnr] = slabp->free;
slabp->free = objnr;上限等于新的对象的数目
}
STATS_DEC_ACTIVE(cachep);减少一个活跃的对象计数
/* fixup slab chains */修复slab链
{
int inuse = slabp->inuse;
if (unlikely(!--slabp->inuse)) {
/* Was partial or full, now empty. */满的或者半满的移到空的里面(只有一个在用的)
list_del(&slabp->list);
list_add(&slabp->list, &cachep->slabs_free);
} else if (unlikely(inuse == cachep->num)) {
/* Was full. */满的变成半满
list_del(&slabp->list);
list_add(&slabp->list, &cachep->slabs_partial);
}
}
}
#ifdef CONFIG_SMP 条件编译,对称多处理器
static inline void __free_block (kmem_cache_t* cachep,
void** objpp, int len)
{
for ( ; len > 0; len--, objpp++)
kmem_cache_free_one(cachep, *objpp);只要还有,就一直释放,直到空了
}
static void free_block (kmem_cache_t* cachep, void** objpp, int len)
{
spin_lock(&cachep->spinlock);
__free_block(cachep, objpp, len);加锁,调用__free_block(),解锁
spin_unlock(&cachep->spinlock);
}
#endif
/*
* __kmem_cache_free
* called with disabled ints
*/
static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
{
#ifdef CONFIG_SMP 条件编译,对称多处理器支持,对每个CPU的缓存作释放操作并且计数(如果定义STATS为1,否则不计数)。
cpucache_t *cc = cc_data(cachep);
CHECK_PAGE(virt_to_page(objp));
if (cc) {
int batchcount;
if (cc->avail limit) {
STATS_INC_FREEHIT(cachep);
cc_entry(cc)[cc->avail++] = objp;
return;
}
STATS_INC_FREEMISS(cachep);
batchcount = cachep->batchcount;
cc->avail -= batchcount;
free_block(cachep,
&cc_entry(cc)[cc->avail],batchcount);
cc_entry(cc)[cc->avail++] = objp;
return;
} else {
free_block(cachep, &objp, 1);
}
#else
kmem_cache_free_one(cachep, objp); 单CPU,直接释放
#endif
}
/**
* kmem_cache_alloc - Allocate an object
* @cachep: The cache to allocate from.
* @flags: See kmalloc().
*
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/申请一个对象空间
void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
{
return __kmem_cache_alloc(cachep, flags);
}
/**
* kmalloc - allocate memory申请内核态内存
* @size: how many bytes of memory are required.需要申请的大小
* @flags: the type of memory to allocate.申请的类型
*
* kmalloc is the normal method of allocating memory
* in the kernel.
*
* The @flags argument may be one of:
*
* %GFP_USER - Allocate memory on behalf of user. May sleep.根据用户来申请,申请的时候有可能会转入睡眠
*
* %GFP_KERNEL - Allocate normal kernel ram. May sleep.申请普通内核内存,申请的时候有可能会转入睡眠
*
* %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.申请原子操作过程,申请的过程不会被打断。在中断处理中使用。
*
* Additionally, the %GFP_DMA flag may be set to indicate the memory
* must be suitable for DMA. This can mean different things on different
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/申请支持DMA方式访问。在不同的平台上有不同的含义,比如i386平台指必须来自前16M内存。
void * kmalloc (size_t size, int flags)
{
cache_sizes_t *csizep = cache_sizes;
for (; csizep->cs_size; csizep++) {
if (size > csizep->cs_size)遍历缓存链表,找到第一个符合要求的slab块
continue;
return __kmem_cache_alloc(flags & GFP_DMA ? 根据是否是DMA来决定在哪个链上找
csizep->cs_dmacachep : csizep->cs_cachep, flags);
}
return NULL;不成功则返回NULL
}
/**
* kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
* @objp: The previously allocated object.
*
* Free an object which was previously allocated from this
* cache.
*/释放缓存中的一个对象
void kmem_cache_free (kmem_cache_t *cachep, void *objp)
{
unsigned long flags;
#if DEBUG 条件编译
CHECK_PAGE(virt_to_page(objp));
if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))如果对象不在缓存的页里,那么报错
BUG();
#endif
local_irq_save(flags);
__kmem_cache_free(cachep, objp);存中断,执行释放,恢复中断
local_irq_restore(flags);
}
/**
* kfree - free previously allocated memory
* @objp: pointer returned by kmalloc.
*
* Don't free memory not originally allocated by kmalloc()
* or you will run into trouble.
*/释放以前申请的内存,不要释放不是kmalloc()分配的内存,否则会出现问题
void kfree (const void *objp)
{
kmem_cache_t *c;
unsigned long flags;
if (!objp)空无法释放
return;
local_irq_save(flags);存中断
CHECK_PAGE(virt_to_page(objp));检查对象所在的页
c = GET_PAGE_CACHE(virt_to_page(objp));c指向对象所在的页
__kmem_cache_free(c, (void*)objp);释放这个对象
local_irq_restore(flags);恢复中断
}
unsigned int kmem_cache_size(kmem_cache_t *cachep)
{
#if DEBUG 条件编译,调试模式下
if (cachep->flags & SLAB_RED_ZONE)
return (cachep->objsize - 2*BYTES_PER_WORD);如果是红区剪掉边沿的2个字占用的空间
#endif
return cachep->objsize;返回缓存中对象空间的大小
}
kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
{寻找普通缓存
cache_sizes_t *csizep = cache_sizes;
/* This function could be moved to the header file, and
* made inline so consumers can quickly determine what
* cache pointer they require.
*/注释建议将本函数移动到某一头文件中
for ( ; csizep->cs_size; csizep++) {
if (size > csizep->cs_size)
continue;
break;
}
return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;判读是否要求dma,返回相应的
}
#ifdef CONFIG_SMP 条件编译,对称多处理器的支持:
/* called with cache_chain_sem acquired. */需要有cache_chain_sem允许才可以被调用
static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount)
{对于cpu缓存的优化
ccupdate_struct_t new;
int i;
/*
* These are admin-provided, so we are more graceful.
*/
if (limit limit)
return -EINVAL;
if (limit != 0 && !batchcount)
return -EINVAL;
memset(&new.new,0,sizeof(new.new));对新的cpu缓存更新结构体置0
if (limit) {
for (i = 0; ilimit = limit;设置上限
ccnew->avail = 0;0个可用
new.new[cpu_logical_map(i)] = ccnew;新的cpu逻辑映射
}
}
new.cachep = cachep;指向原有缓存区
spin_lock_irq(&cachep->spinlock);加锁
cachep->batchcount = batchcount;计数
spin_unlock_irq(&cachep->spinlock);解锁
smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);给每个cpu都做这个操作
for (i = 0; i avail);
local_irq_enable();
kfree(ccold);
}
return 0;
oom:
for (i--; i >= 0; i--)
kfree(new.new[cpu_logical_map(i)]);释放新的cpu逻辑映射
return -ENOMEM;没有内存错误
}
static void enable_cpucache (kmem_cache_t *cachep)
{
int err;
int limit;
/* FIXME: optimize */设上限
if (cachep->objsize > PAGE_SIZE)
return;
if (cachep->objsize > 1024)
limit = 60;
else if (cachep->objsize > 256)
limit = 124;
else
limit = 252;
err = kmem_tune_cpucache(cachep, limit, limit/2);
if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
cachep->name, -err);
}激活CPU缓存
static void enable_all_cpucaches (void)
{
struct list_head* p;
down(&cache_chain_sem);允许操作缓存链
p = &cache_cache.next;
do {
kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
enable_cpucache(cachep);挨个激活
p = cachep->next.next;
} while (p != &cache_cache.next);
up(&cache_chain_sem);禁止操作缓存链
}
#endif
/**
* kmem_cache_reap - Reclaim memory from caches.
* @gfp_mask: the type of memory required.
*
* Called from do_try_to_free_pages() and __alloc_pages()
*/从缓存中回收内存,在do_try_to_free_pages()和alloc_pages()中调用
int kmem_cache_reap (int gfp_mask)
{
slab_t *slabp;
kmem_cache_t *searchp;
kmem_cache_t *best_cachep;
unsigned int best_pages;
unsigned int best_len;
unsigned int scan;
int ret = 0;
if (gfp_mask & __GFP_WAIT)需要的内存中包含了GFP_WAIT位
down(&cache_chain_sem);允许链操作
else
if (down_trylock(&cache_chain_sem))非阻塞允许链操作
return 0;
scan = REAP_SCANLEN (扫描长度10)
best_len = 0;
best_pages = 0;
best_cachep = NULL;
searchp = clock_searchp;指上次成功回收内存的缓存区结构
do {
unsigned int pages;
struct list_head* p;
unsigned int full_free;
/* It's safe to test this without holding the cache-lock. */
if (searchp->flags & SLAB_NO_REAP)如果不让回收,则到下一轮
goto next;
spin_lock_irq(&searchp->spinlock);加锁
if (searchp->growing)如果正在增长,则到下一轮(同时解锁定)
goto next_unlock;
if (searchp->dflags & DFLGS_GROWN) {
searchp->dflags &= ~DFLGS_GROWN;
goto next_unlock;如果是动态变量,则将dflags清0,然后到下一轮(同时解锁定)
}
#ifdef CONFIG_SMP 条件编译。对称多处理器的情况
{
cpucache_t *cc = cc_data(searchp);
if (cc && cc->avail) { 如果有cpu缓存,并且可用,则释放
__free_block(searchp, cc_entry(cc), cc->avail);
cc->avail = 0;
}
}
#endif
full_free = 0;
p = searchp->slabs_free.next;指向下一个空slab的对象空间
while (p != &searchp->slabs_free) {
slabp = list_entry(p, slab_t, list);
#if DEBUG 条件编译,如果空的里面有正在使用的对象则报错
if (slabp->inuse)
BUG();
#endif
full_free++;
p = p->next;
}统计共有多少空的(保存在full_free)
/*
* Try to avoid slabs with constructors and/or
* more than one page per slab (as it can be difficult
* to get high orders from gfp()).
*/
pages = full_free * (1gfporder); page等于所有的空闲slab块占有的页面数
if (searchp->ctor)如果有自己的构建函数
pages = (pages*4+1)/5;
if (searchp->gfporder)如果超过了1个页面
pages = (pages*4+1)/5;
if (pages > best_pages) {如果页面数比原有最多的页面数多,则最佳的准备释放的缓存变为新的
best_cachep = searchp;
best_len = full_free;
best_pages = pages;
if (pages >= REAP_PERFECT) {如果大于REAP_PREFECT(10),则将clock_searchp指向这些页,然后直接跳转到perfect
clock_searchp = list_entry(searchp->next.next,
kmem_cache_t,next);
goto perfect;
}
}
next_unlock:
spin_unlock_irq(&searchp->spinlock);解锁
next:
searchp = list_entry(searchp->next.next,kmem_cache_t,next);指向下一个链
} while (--scan && searchp != clock_searchp);搜索达到限制次数或者已经找了所有的slab
clock_searchp = searchp;
if (!best_cachep)找不到可以回收的
/* couldn't find anything to reap */
goto out;退出
spin_lock_irq(&best_cachep->spinlock);加锁
perfect:
/* free only 50% of the free slabs */
best_len = (best_len + 1)/2;认为可以释放的只有一半
for (scan = 0; scan growing)如果正在增长,则推出
break;
p = best_cachep->slabs_free.prev;
if (p == &best_cachep->slabs_free)如果指向了头,则退出
break;
slabp = list_entry(p,slab_t,list);
#if DEBUG 条件编译
if (slabp->inuse) 如果正在使用,则报错
BUG();
#endif
list_del(&slabp->list);删除链
STATS_INC_REAPED(best_cachep);回收计数
/* Safe to drop the lock. The slab is no longer linked to the
* cache.
*/由于slab已经不在链中,所以可以解锁
spin_unlock_irq(&best_cachep->spinlock);解锁
kmem_slab_destroy(best_cachep, slabp);删除这个slab,将内存还给系统
spin_lock_irq(&best_cachep->spinlock);加锁
}
spin_unlock_irq(&best_cachep->spinlock);解锁
ret = scan * (1 gfporder);返回释放的个数
out:
up(&cache_chain_sem);禁止对缓存链操作
return ret;
}
#ifdef CONFIG_PROC_FS 下面是对/proc/slabinfo支持的操作。
static void *s_start(struct seq_file *m, loff_t *pos)
{
loff_t n = *pos;
struct list_head *p;
down(&cache_chain_sem);
if (!n)
return (void *)1;
p = &cache_cache.next;
while (--n) {
p = p->next;
if (p == &cache_cache.next)
return NULL;
}
return list_entry(p, kmem_cache_t, next);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
kmem_cache_t *cachep = p;
++*pos;
if (p == (void *)1)
return &cache_cache;
cachep = list_entry(cachep->next.next, kmem_cache_t, next);
return cachep == &cache_cache ? NULL : cachep;
}
static void s_stop(struct seq_file *m, void *p)
{
up(&cache_chain_sem);
}
static int s_show(struct seq_file *m, void *p)
{
kmem_cache_t *cachep = p;
struct list_head *q;
slab_t *slabp;
unsigned long active_objs;
unsigned long num_objs;
unsigned long active_slabs = 0;
unsigned long num_slabs;
const char *name;
if (p == (void*)1) {
/*
* Output format version, so at least we can change it
* without _too_ many complaints.
*/
seq_puts(m, "slabinfo - version: 1.1"
#if STATS
" (statistics)"
#endif
#ifdef CONFIG_SMP
" (SMP)"
#endif
"\n");
return 0;
}
spin_lock_irq(&cachep->spinlock);
active_objs = 0;
num_slabs = 0;
list_for_each(q,&cachep->slabs_full) {
slabp = list_entry(q, slab_t, list);
if (slabp->inuse != cachep->num)
BUG();
active_objs += cachep->num;
active_slabs++;
}
list_for_each(q,&cachep->slabs_partial) {
slabp = list_entry(q, slab_t, list);
if (slabp->inuse == cachep->num || !slabp->inuse)
BUG();
active_objs += slabp->inuse;
active_slabs++;
}
list_for_each(q,&cachep->slabs_free) {
slabp = list_entry(q, slab_t, list);
if (slabp->inuse)
BUG();
num_slabs++;
}
num_slabs+=active_slabs;
num_objs = num_slabs*cachep->num;
name = cachep->name;
{
char tmp;
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(KERNEL_DS);
if (__get_user(tmp, name))
name = "broken";
set_fs(old_fs);
}
seq_printf(m, "%-17s %6lu %6lu %6u %4lu %4lu %4u",
name, active_objs, num_objs, cachep->objsize,
active_slabs, num_slabs, (1gfporder));
#if STATS
{
unsigned long errors = cachep->errors;
unsigned long high = cachep->high_mark;
unsigned long grown = cachep->grown;
unsigned long reaped = cachep->reaped;
unsigned long allocs = cachep->num_allocations;
seq_printf(m, " : %6lu %7lu %5lu %4lu %4lu",
high, allocs, grown, reaped, errors);
}
#endif
#ifdef CONFIG_SMP
{
cpucache_t *cc = cc_data(cachep);
unsigned int batchcount = cachep->batchcount;
unsigned int limit;
if (cc)
limit = cc->limit;
else
limit = 0;
seq_printf(m, " : %4u %4u",
limit, batchcount);
}
#endif
#if STATS && defined(CONFIG_SMP)
{
unsigned long allochit = atomic_read(&cachep->allochit);
unsigned long allocmiss = atomic_read(&cachep->allocmiss);
unsigned long freehit = atomic_read(&cachep->freehit);
unsigned long freemiss = atomic_read(&cachep->freemiss);
seq_printf(m, " : %6lu %6lu %6lu %6lu",
allochit, allocmiss, freehit, freemiss);
}
#endif
spin_unlock_irq(&cachep->spinlock);
seq_putc(m, '\n');
return 0;
}
/**
* slabinfo_op - iterator that generates /proc/slabinfo
*
* Output layout:
* cache-name
* num-active-objs
* total-objs
* object size
* num-active-slabs
* total-slabs
* num-pages-per-slab
* + further values on SMP and with statistics enabled
*/
struct seq_operations slabinfo_op = {
start: s_start,
next: s_next,
stop: s_stop,
show: s_show
};
#define MAX_SLABINFO_WRITE 128
/**
* slabinfo_write - SMP tuning for the slab allocator
* @file: unused
* @buffer: user buffer
* @count: data len
* @data: unused
*/
ssize_t slabinfo_write(struct file *file, const char *buffer,
size_t count, loff_t *ppos)
{
#ifdef CONFIG_SMP
char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
int limit, batchcount, res;
struct list_head *p;
if (count > MAX_SLABINFO_WRITE)
return -EINVAL;
if (copy_from_user(&kbuf, buffer, count))
return -EFAULT;
kbuf[MAX_SLABINFO_WRITE] = '\0';
tmp = strchr(kbuf, ' ');
if (!tmp)
return -EINVAL;
*tmp = '\0';
tmp++;
limit = simple_strtol(tmp, &tmp, 10);
while (*tmp == ' ')
tmp++;
batchcount = simple_strtol(tmp, &tmp, 10);
/* Find the cache in the chain of caches. */
down(&cache_chain_sem);
res = -EINVAL;
list_for_each(p,&cache_chain) {
kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
if (!strcmp(cachep->name, kbuf)) {
res = kmem_tune_cpucache(cachep, limit, batchcount);
break;
}
}
up(&cache_chain_sem);
if (res >= 0)
res = count;
return res;
#else
return -EINVAL;
#endif
}
#endif
本文来自ChinaUnix博客,如果查看原文请点:http://blog.chinaunix.net/u1/35281/showart_279974.html |
|