Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew Morton)
Browse files Browse the repository at this point in the history
Merge fixes from Andrew Morton:
 "Bunch of fixes.

  And a reversion of mhocko's "Soft limit rework" patch series.  This is
  actually your fault for opening the merge window when I was off racing ;)

  I didn't read the email thread before sending everything off.
  Johannes Weiner raised significant issues:

    http://www.spinics.net/lists/cgroups/msg08813.html

  and we agreed to back it all out"

I clearly need to be more aware of Andrew's racing schedule.

* akpm:
  MAINTAINERS: update mach-bcm related email address
  checkpatch: make extern in .h prototypes quieter
  cciss: fix info leak in cciss_ioctl32_passthru()
  cpqarray: fix info leak in ida_locked_ioctl()
  kernel/reboot.c: re-enable the function of variable reboot_default
  audit: fix endless wait in audit_log_start()
  revert "memcg, vmscan: integrate soft reclaim tighter with zone shrinking code"
  revert "memcg: get rid of soft-limit tree infrastructure"
  revert "vmscan, memcg: do softlimit reclaim also for targeted reclaim"
  revert "memcg: enhance memcg iterator to support predicates"
  revert "memcg: track children in soft limit excess to improve soft limit"
  revert "memcg, vmscan: do not attempt soft limit reclaim if it would not scan anything"
  revert "memcg: track all children over limit in the root"
  revert "memcg, vmscan: do not fall into reclaim-all pass too quickly"
  fs/ocfs2/super.c: use a bigger nodestr in ocfs2_dismount_volume
  watchdog: update watchdog_thresh properly
  watchdog: update watchdog attributes atomically
  • Loading branch information
torvalds committed Sep 25, 2013
2 parents e288e93 + 497a045 commit a153e67
Show file tree
Hide file tree
Showing 12 changed files with 527 additions and 262 deletions.
3 changes: 2 additions & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -1812,7 +1812,8 @@ S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/

BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
M: Christian Daudt <[email protected]>
M: Christian Daudt <[email protected]>
L: [email protected]
T: git git://git.github.com/broadcom/bcm11351
S: Maintained
F: arch/arm/mach-bcm/
Expand Down
1 change: 1 addition & 0 deletions drivers/block/cciss.c
Original file line number Diff line number Diff line change
Expand Up @@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
int err;
u32 cp;

memset(&arg64, 0, sizeof(arg64));
err = 0;
err |=
copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
Expand Down
1 change: 1 addition & 0 deletions drivers/block/cpqarray.c
Original file line number Diff line number Diff line change
Expand Up @@ -1193,6 +1193,7 @@ static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned in
ida_pci_info_struct pciinfo;

if (!arg) return -EINVAL;
memset(&pciinfo, 0, sizeof(pciinfo));
pciinfo.bus = host->pci_dev->bus->number;
pciinfo.dev_fn = host->pci_dev->devfn;
pciinfo.board_id = host->board_id;
Expand Down
2 changes: 1 addition & 1 deletion fs/ocfs2/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
{
int tmp, hangup_needed = 0;
struct ocfs2_super *osb = NULL;
char nodestr[8];
char nodestr[12];

trace_ocfs2_dismount_volume(sb);

Expand Down
55 changes: 10 additions & 45 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};

enum mem_cgroup_filter_t {
VISIT, /* visit current node */
SKIP, /* skip the current node and continue traversal */
SKIP_TREE, /* skip the whole subtree and continue traversal */
};

/*
* mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
* iterate through the hierarchy tree. Each tree element is checked by the
* predicate before it is returned by the iterator. If a filter returns
* SKIP or SKIP_TREE then the iterator code continues traversal (with the
* next node down the hierarchy or the next node that doesn't belong under the
* memcg's subtree).
*/
typedef enum mem_cgroup_filter_t
(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);

#ifdef CONFIG_MEMCG
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
Expand Down Expand Up @@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);

struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim,
mem_cgroup_iter_filter cond);

static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim)
{
return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
}

struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);

/*
Expand Down Expand Up @@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
mem_cgroup_update_page_stat(page, idx, -1);
}

enum mem_cgroup_filter_t
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
struct mem_cgroup *root);
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);

void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
Expand Down Expand Up @@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok)
{
}
static inline struct mem_cgroup *
mem_cgroup_iter_cond(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim,
mem_cgroup_iter_filter cond)
{
/* first call must return non-NULL, second return NULL */
return (struct mem_cgroup *)(unsigned long)!prev;
}

static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
Expand Down Expand Up @@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
}

static inline
enum mem_cgroup_filter_t
mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
struct mem_cgroup *root)
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned)
{
return VISIT;
return 0;
}

static inline void mem_cgroup_split_huge_fixup(struct page *head)
Expand Down
6 changes: 6 additions & 0 deletions include/linux/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,

static inline void kick_all_cpus_sync(void) { }

static inline void __smp_call_function_single(int cpuid,
struct call_single_data *data, int wait)
{
on_each_cpu(data->func, data->info, wait);
}

#endif /* !SMP */

/*
Expand Down
5 changes: 3 additions & 2 deletions kernel/audit.c
Original file line number Diff line number Diff line change
Expand Up @@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,

sleep_time = timeout_start + audit_backlog_wait_time -
jiffies;
if ((long)sleep_time > 0)
if ((long)sleep_time > 0) {
wait_for_auditd(sleep_time);
continue;
continue;
}
}
if (audit_rate_check() && printk_ratelimit())
printk(KERN_WARNING
Expand Down
9 changes: 8 additions & 1 deletion kernel/reboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
#endif
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;

int reboot_default;
/*
* This variable is used privately to keep track of whether or not
* reboot_type is still set to its default value (i.e., reboot= hasn't
* been set on the command line). This is needed so that we can
* suppress DMI scanning for reboot quirks. Without it, it's
* impossible to override a faulty reboot quirk without recompiling.
*/
int reboot_default = 1;
int reboot_cpu;
enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
Expand Down
60 changes: 55 additions & 5 deletions kernel/watchdog.c
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
.unpark = watchdog_enable,
};

static int watchdog_enable_all_cpus(void)
static void restart_watchdog_hrtimer(void *info)
{
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
int ret;

/*
* No need to cancel and restart hrtimer if it is currently executing
* because it will reprogram itself with the new period now.
* We should never see it unqueued here because we are running per-cpu
* with interrupts disabled.
*/
ret = hrtimer_try_to_cancel(hrtimer);
if (ret == 1)
hrtimer_start(hrtimer, ns_to_ktime(sample_period),
HRTIMER_MODE_REL_PINNED);
}

static void update_timers(int cpu)
{
struct call_single_data data = {.func = restart_watchdog_hrtimer};
/*
* Make sure that perf event counter will adopt to a new
* sampling period. Updating the sampling period directly would
* be much nicer but we do not have an API for that now so
* let's use a big hammer.
* Hrtimer will adopt the new period on the next tick but this
* might be late already so we have to restart the timer as well.
*/
watchdog_nmi_disable(cpu);
__smp_call_function_single(cpu, &data, 1);
watchdog_nmi_enable(cpu);
}

static void update_timers_all_cpus(void)
{
int cpu;

get_online_cpus();
preempt_disable();
for_each_online_cpu(cpu)
update_timers(cpu);
preempt_enable();
put_online_cpus();
}

static int watchdog_enable_all_cpus(bool sample_period_changed)
{
int err = 0;

Expand All @@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
pr_err("Failed to create watchdog threads, disabled\n");
else
watchdog_running = 1;
} else if (sample_period_changed) {
update_timers_all_cpus();
}

return err;
Expand All @@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int err, old_thresh, old_enabled;
static DEFINE_MUTEX(watchdog_proc_mutex);

mutex_lock(&watchdog_proc_mutex);
old_thresh = ACCESS_ONCE(watchdog_thresh);
old_enabled = ACCESS_ONCE(watchdog_user_enabled);

err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (err || !write)
return err;
goto out;

set_sample_period();
/*
Expand All @@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
* watchdog_*_all_cpus() function takes care of this.
*/
if (watchdog_user_enabled && watchdog_thresh)
err = watchdog_enable_all_cpus();
err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
else
watchdog_disable_all_cpus();

Expand All @@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
watchdog_thresh = old_thresh;
watchdog_user_enabled = old_enabled;
}

out:
mutex_unlock(&watchdog_proc_mutex);
return err;
}
#endif /* CONFIG_SYSCTL */
Expand All @@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
set_sample_period();

if (watchdog_user_enabled)
watchdog_enable_all_cpus();
watchdog_enable_all_cpus(false);
}
Loading

0 comments on commit a153e67

Please sign in to comment.