Skip to content

Commit

Permalink
mm/page_alloc: integrate classzone_idx and high_zoneidx
Browse files Browse the repository at this point in the history
classzone_idx is just different name for high_zoneidx now.  So, integrate
them and add some comment to struct alloc_context in order to reduce
future confusion about the meaning of this variable.

The accessor, ac_classzone_idx() is also removed since it isn't needed
after integration.

In addition to integration, this patch also renames high_zoneidx to
highest_zoneidx since it represents more precise meaning.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Baoquan He <bhe@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ye Xiaolong <xiaolong.ye@intel.com>
Link: http://lkml.kernel.org/r/1587095923-7515-3-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
JoonsooKim authored and torvalds committed Jun 4, 2020
1 parent 3334a45 commit 97a225e
Show file tree
Hide file tree
Showing 12 changed files with 175 additions and 150 deletions.
9 changes: 5 additions & 4 deletions include/linux/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
unsigned int alloc_flags, int classzone_idx);
unsigned int alloc_flags, int highest_zoneidx);

extern void defer_compaction(struct zone *zone, int order);
extern bool compaction_deferred(struct zone *zone, int order);
Expand Down Expand Up @@ -182,15 +182,15 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,

extern int kcompactd_run(int nid);
extern void kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);

#else
static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}

static inline enum compact_result compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx)
int alloc_flags, int highest_zoneidx)
{
return COMPACT_SKIPPED;
}
Expand Down Expand Up @@ -232,7 +232,8 @@ static inline void kcompactd_stop(int nid)
{
}

static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
static inline void wakeup_kcompactd(pg_data_t *pgdat,
int order, int highest_zoneidx)
{
}

Expand Down
12 changes: 6 additions & 6 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -699,13 +699,13 @@ typedef struct pglist_data {
struct task_struct *kswapd; /* Protected by
mem_hotplug_begin/end() */
int kswapd_order;
enum zone_type kswapd_classzone_idx;
enum zone_type kswapd_highest_zoneidx;

int kswapd_failures; /* Number of 'reclaimed == 0' runs */

#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
#endif
Expand Down Expand Up @@ -783,15 +783,15 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)

void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
enum zone_type classzone_idx);
enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, unsigned int alloc_flags,
int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx);
unsigned long mark, int highest_zoneidx);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
Expand Down
22 changes: 13 additions & 9 deletions include/trace/events/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -314,40 +314,44 @@ TRACE_EVENT(mm_compaction_kcompactd_sleep,

DECLARE_EVENT_CLASS(kcompactd_wake_template,

TP_PROTO(int nid, int order, enum zone_type classzone_idx),
TP_PROTO(int nid, int order, enum zone_type highest_zoneidx),

TP_ARGS(nid, order, classzone_idx),
TP_ARGS(nid, order, highest_zoneidx),

TP_STRUCT__entry(
__field(int, nid)
__field(int, order)
__field(enum zone_type, classzone_idx)
__field(enum zone_type, highest_zoneidx)
),

TP_fast_assign(
__entry->nid = nid;
__entry->order = order;
__entry->classzone_idx = classzone_idx;
__entry->highest_zoneidx = highest_zoneidx;
),

/*
* classzone_idx is previous name of the highest_zoneidx.
* Reason not to change it is the ABI requirement of the tracepoint.
*/
TP_printk("nid=%d order=%d classzone_idx=%-8s",
__entry->nid,
__entry->order,
__print_symbolic(__entry->classzone_idx, ZONE_TYPE))
__print_symbolic(__entry->highest_zoneidx, ZONE_TYPE))
);

DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd,

TP_PROTO(int nid, int order, enum zone_type classzone_idx),
TP_PROTO(int nid, int order, enum zone_type highest_zoneidx),

TP_ARGS(nid, order, classzone_idx)
TP_ARGS(nid, order, highest_zoneidx)
);

DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake,

TP_PROTO(int nid, int order, enum zone_type classzone_idx),
TP_PROTO(int nid, int order, enum zone_type highest_zoneidx),

TP_ARGS(nid, order, classzone_idx)
TP_ARGS(nid, order, highest_zoneidx)
);
#endif

Expand Down
14 changes: 9 additions & 5 deletions include/trace/events/vmscan.h
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ TRACE_EVENT(mm_shrink_slab_end,
);

TRACE_EVENT(mm_vmscan_lru_isolate,
TP_PROTO(int classzone_idx,
TP_PROTO(int highest_zoneidx,
int order,
unsigned long nr_requested,
unsigned long nr_scanned,
Expand All @@ -274,10 +274,10 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
isolate_mode_t isolate_mode,
int lru),

TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),
TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),

TP_STRUCT__entry(
__field(int, classzone_idx)
__field(int, highest_zoneidx)
__field(int, order)
__field(unsigned long, nr_requested)
__field(unsigned long, nr_scanned)
Expand All @@ -288,7 +288,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
),

TP_fast_assign(
__entry->classzone_idx = classzone_idx;
__entry->highest_zoneidx = highest_zoneidx;
__entry->order = order;
__entry->nr_requested = nr_requested;
__entry->nr_scanned = nr_scanned;
Expand All @@ -298,9 +298,13 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__entry->lru = lru;
),

/*
* classzone is previous name of the highest_zoneidx.
* Reason not to change it is the ABI requirement of the tracepoint.
*/
TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
__entry->isolate_mode,
__entry->classzone_idx,
__entry->highest_zoneidx,
__entry->order,
__entry->nr_requested,
__entry->nr_scanned,
Expand Down
64 changes: 32 additions & 32 deletions mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -1968,7 +1968,7 @@ static enum compact_result compact_finished(struct compact_control *cc)
*/
static enum compact_result __compaction_suitable(struct zone *zone, int order,
unsigned int alloc_flags,
int classzone_idx,
int highest_zoneidx,
unsigned long wmark_target)
{
unsigned long watermark;
Expand All @@ -1981,7 +1981,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
* If watermarks for high-order allocation are already met, there
* should be no need for compaction at all.
*/
if (zone_watermark_ok(zone, order, watermark, classzone_idx,
if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
alloc_flags))
return COMPACT_SUCCESS;

Expand All @@ -1991,9 +1991,9 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
* watermark and alloc_flags have to match, or be more pessimistic than
* the check in __isolate_free_page(). We don't use the direct
* compactor's alloc_flags, as they are not relevant for freepage
* isolation. We however do use the direct compactor's classzone_idx to
* skip over zones where lowmem reserves would prevent allocation even
* if compaction succeeds.
* isolation. We however do use the direct compactor's highest_zoneidx
* to skip over zones where lowmem reserves would prevent allocation
* even if compaction succeeds.
* For costly orders, we require low watermark instead of min for
* compaction to proceed to increase its chances.
* ALLOC_CMA is used, as pages in CMA pageblocks are considered
Expand All @@ -2002,7 +2002,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
low_wmark_pages(zone) : min_wmark_pages(zone);
watermark += compact_gap(order);
if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
ALLOC_CMA, wmark_target))
return COMPACT_SKIPPED;

Expand All @@ -2011,12 +2011,12 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,

enum compact_result compaction_suitable(struct zone *zone, int order,
unsigned int alloc_flags,
int classzone_idx)
int highest_zoneidx)
{
enum compact_result ret;
int fragindex;

ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
zone_page_state(zone, NR_FREE_PAGES));
/*
* fragmentation index determines if allocation failures are due to
Expand Down Expand Up @@ -2057,8 +2057,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
* Make sure at least one zone would pass __compaction_suitable if we continue
* retrying the reclaim.
*/
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
ac->nodemask) {
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
unsigned long available;
enum compact_result compact_result;

Expand All @@ -2071,7 +2071,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
available = zone_reclaimable_pages(zone) / order;
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
compact_result = __compaction_suitable(zone, order, alloc_flags,
ac_classzone_idx(ac), available);
ac->highest_zoneidx, available);
if (compact_result != COMPACT_SKIPPED)
return true;
}
Expand Down Expand Up @@ -2102,7 +2102,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)

cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
cc->classzone_idx);
cc->highest_zoneidx);
/* Compaction is likely to fail */
if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
return ret;
Expand Down Expand Up @@ -2293,7 +2293,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)

static enum compact_result compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask, enum compact_priority prio,
unsigned int alloc_flags, int classzone_idx,
unsigned int alloc_flags, int highest_zoneidx,
struct page **capture)
{
enum compact_result ret;
Expand All @@ -2305,7 +2305,7 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.mode = (prio == COMPACT_PRIO_ASYNC) ?
MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
.alloc_flags = alloc_flags,
.classzone_idx = classzone_idx,
.highest_zoneidx = highest_zoneidx,
.direct_compaction = true,
.whole_zone = (prio == MIN_COMPACT_PRIORITY),
.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
Expand Down Expand Up @@ -2361,8 +2361,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);

/* Compact each zone in the list */
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
ac->nodemask) {
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
enum compact_result status;

if (prio > MIN_COMPACT_PRIORITY
Expand All @@ -2372,7 +2372,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
}

status = compact_zone_order(zone, order, gfp_mask, prio,
alloc_flags, ac_classzone_idx(ac), capture);
alloc_flags, ac->highest_zoneidx, capture);
rc = max(status, rc);

/* The allocation should succeed, stop compacting */
Expand Down Expand Up @@ -2507,16 +2507,16 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
{
int zoneid;
struct zone *zone;
enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;

for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
zone = &pgdat->node_zones[zoneid];

if (!populated_zone(zone))
continue;

if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
classzone_idx) == COMPACT_CONTINUE)
highest_zoneidx) == COMPACT_CONTINUE)
return true;
}

Expand All @@ -2534,16 +2534,16 @@ static void kcompactd_do_work(pg_data_t *pgdat)
struct compact_control cc = {
.order = pgdat->kcompactd_max_order,
.search_order = pgdat->kcompactd_max_order,
.classzone_idx = pgdat->kcompactd_classzone_idx,
.highest_zoneidx = pgdat->kcompactd_highest_zoneidx,
.mode = MIGRATE_SYNC_LIGHT,
.ignore_skip_hint = false,
.gfp_mask = GFP_KERNEL,
};
trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
cc.classzone_idx);
cc.highest_zoneidx);
count_compact_event(KCOMPACTD_WAKE);

for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
int status;

zone = &pgdat->node_zones[zoneid];
Expand Down Expand Up @@ -2592,25 +2592,25 @@ static void kcompactd_do_work(pg_data_t *pgdat)

/*
* Regardless of success, we are done until woken up next. But remember
* the requested order/classzone_idx in case it was higher/tighter than
* our current ones
* the requested order/highest_zoneidx in case it was higher/tighter
* than our current ones
*/
if (pgdat->kcompactd_max_order <= cc.order)
pgdat->kcompactd_max_order = 0;
if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)
pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
}

void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
{
if (!order)
return;

if (pgdat->kcompactd_max_order < order)
pgdat->kcompactd_max_order = order;

if (pgdat->kcompactd_classzone_idx > classzone_idx)
pgdat->kcompactd_classzone_idx = classzone_idx;
if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx)
pgdat->kcompactd_highest_zoneidx = highest_zoneidx;

/*
* Pairs with implicit barrier in wait_event_freezable()
Expand All @@ -2623,7 +2623,7 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
return;

trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
classzone_idx);
highest_zoneidx);
wake_up_interruptible(&pgdat->kcompactd_wait);
}

Expand All @@ -2644,7 +2644,7 @@ static int kcompactd(void *p)
set_freezable();

pgdat->kcompactd_max_order = 0;
pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;

while (!kthread_should_stop()) {
unsigned long pflags;
Expand Down
Loading

0 comments on commit 97a225e

Please sign in to comment.