Skip to content
This repository has been archived by the owner on Sep 24, 2020. It is now read-only.

Commit

Permalink
mm: rework memcg kernel stack accounting
Browse files Browse the repository at this point in the history
If CONFIG_VMAP_STACK is set, kernel stacks are allocated using
__vmalloc_node_range() with __GFP_ACCOUNT.  So kernel stack pages are
charged against corresponding memory cgroups on allocation and uncharged
on releasing them.

The problem is that we do cache kernel stacks in small per-cpu caches and
do reuse them for new tasks, which can belong to different memory cgroups.

Each stack page still holds a reference to the original cgroup, so the
cgroup can't be released until the vmap area is released.

To make this happen we need more than two subsequent exits without forks
in between on the current cpu, which makes it very unlikely to happen.  As
a result, I saw a significant number of dying cgroups (in theory, up to 2
* number_of_cpu + number_of_tasks), which can't be released even by
significant memory pressure.

As a cgroup structure can take a significant amount of memory (first of
all, per-cpu data like memcg statistics), it leads to a noticeable waste
of memory.

Link: http://lkml.kernel.org/r/[email protected]
Fixes: ac496bf ("fork: Optimize task creation by caching two thread stacks per CPU if CONFIG_VMAP_STACK=y")
Signed-off-by: Roman Gushchin <[email protected]>
Reviewed-by: Shakeel Butt <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Cc: Tejun Heo <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
rgushchin authored and torvalds committed Oct 26, 2018
1 parent c5fd3ca commit 9b6f7e1
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 7 deletions.
13 changes: 12 additions & 1 deletion include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -1268,10 +1268,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
void memcg_kmem_put_cache(struct kmem_cache *cachep);
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
struct mem_cgroup *memcg);

#ifdef CONFIG_MEMCG_KMEM
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void memcg_kmem_uncharge(struct page *page, int order);

#ifdef CONFIG_MEMCG_KMEM
extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;

Expand Down Expand Up @@ -1307,6 +1308,16 @@ extern int memcg_expand_shrinker_maps(int new_id);
extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id);
#else

static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
{
return 0;
}

static inline void memcg_kmem_uncharge(struct page *page, int order)
{
}

#define for_each_memcg_cache_index(_idx) \
for (; NULL; )

Expand Down
55 changes: 49 additions & 6 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,14 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
return s->addr;
}

/*
* Allocated stacks are cached and later reused by new threads,
* so memcg accounting is performed manually on assigning/releasing
* stacks to tasks. Drop __GFP_ACCOUNT.
*/
stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
VMALLOC_START, VMALLOC_END,
THREADINFO_GFP,
THREADINFO_GFP & ~__GFP_ACCOUNT,
PAGE_KERNEL,
0, node, __builtin_return_address(0));

Expand All @@ -248,9 +253,19 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
static inline void free_thread_stack(struct task_struct *tsk)
{
#ifdef CONFIG_VMAP_STACK
if (task_stack_vm_area(tsk)) {
struct vm_struct *vm = task_stack_vm_area(tsk);

if (vm) {
int i;

for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
mod_memcg_page_state(vm->pages[i],
MEMCG_KERNEL_STACK_KB,
-(int)(PAGE_SIZE / 1024));

memcg_kmem_uncharge(vm->pages[i], 0);
}

for (i = 0; i < NR_CACHED_STACKS; i++) {
if (this_cpu_cmpxchg(cached_stacks[i],
NULL, tsk->stack_vm_area) != NULL)
Expand Down Expand Up @@ -351,10 +366,6 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
NR_KERNEL_STACK_KB,
PAGE_SIZE / 1024 * account);
}

/* All stack pages belong to the same memcg. */
mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB,
account * (THREAD_SIZE / 1024));
} else {
/*
* All stack pages are in the same zone and belong to the
Expand All @@ -370,6 +381,35 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
}
}

static int memcg_charge_kernel_stack(struct task_struct *tsk)
{
#ifdef CONFIG_VMAP_STACK
struct vm_struct *vm = task_stack_vm_area(tsk);
int ret;

if (vm) {
int i;

for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
/*
* If memcg_kmem_charge() fails, page->mem_cgroup
* pointer is NULL, and both memcg_kmem_uncharge()
* and mod_memcg_page_state() in free_thread_stack()
* will ignore this page. So it's safe.
*/
ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0);
if (ret)
return ret;

mod_memcg_page_state(vm->pages[i],
MEMCG_KERNEL_STACK_KB,
PAGE_SIZE / 1024);
}
}
#endif
return 0;
}

static void release_task_stack(struct task_struct *tsk)
{
if (WARN_ON(tsk->state != TASK_DEAD))
Expand Down Expand Up @@ -807,6 +847,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
if (!stack)
goto free_tsk;

if (memcg_charge_kernel_stack(tsk))
goto free_stack;

stack_vm_area = task_stack_vm_area(tsk);

err = arch_dup_task_struct(tsk, orig);
Expand Down

0 comments on commit 9b6f7e1

Please sign in to comment.