mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
mm/slab_common: move generic bulk alloc/free functions to SLOB
Now that only SLOB use __kmem_cache_{alloc,free}_bulk(), move them to SLOB. No functional change intended. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
2055e67bb6
commit
3041808b52
3 changed files with 21 additions and 40 deletions
|
@ -380,15 +380,6 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
|
|||
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
|
||||
size_t count, loff_t *ppos);
|
||||
|
||||
/*
|
||||
* Generic implementation of bulk operations
|
||||
* These are useful for situations in which the allocator cannot
|
||||
* perform optimizations. In that case segments of the object listed
|
||||
* may be allocated or freed using these operations.
|
||||
*/
|
||||
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
|
||||
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
||||
|
||||
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
|
||||
{
|
||||
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
||||
|
|
|
@ -104,33 +104,6 @@ static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
|
|||
}
|
||||
#endif
|
||||
|
||||
void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
if (s)
|
||||
kmem_cache_free(s, p[i]);
|
||||
else
|
||||
kfree(p[i]);
|
||||
}
|
||||
}
|
||||
|
||||
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
|
||||
void **p)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
void *x = p[i] = kmem_cache_alloc(s, flags);
|
||||
if (!x) {
|
||||
__kmem_cache_free_bulk(s, i, p);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out what the alignment of the objects will be given a set of
|
||||
* flags, a user specified alignment and the size of the objects.
|
||||
|
|
25
mm/slob.c
25
mm/slob.c
|
@ -692,16 +692,33 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
|
|||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
|
||||
void kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
|
||||
{
|
||||
__kmem_cache_free_bulk(s, size, p);
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
if (s)
|
||||
kmem_cache_free(s, p[i]);
|
||||
else
|
||||
kfree(p[i]);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free_bulk);
|
||||
|
||||
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
||||
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
|
||||
void **p)
|
||||
{
|
||||
return __kmem_cache_alloc_bulk(s, flags, size, p);
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
void *x = p[i] = kmem_cache_alloc(s, flags);
|
||||
|
||||
if (!x) {
|
||||
kmem_cache_free_bulk(s, i, p);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
|
||||
|
||||
|
|
Loading…
Reference in a new issue