Lines Matching refs:pgdat

1024 static bool skip_throttle_noprogress(pg_data_t *pgdat)  in skip_throttle_noprogress()  argument
1033 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
1042 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress()
1057 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) in reclaim_throttle() argument
1059 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
1086 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
1087 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
1088 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); in reclaim_throttle()
1095 if (skip_throttle_noprogress(pgdat)) { in reclaim_throttle()
1117 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
1119 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
1129 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, in __acct_reclaim_writeback() argument
1143 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
1144 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
1147 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
1484 struct pglist_data *pgdat) in demote_page_list() argument
1486 int target_nid = next_demotion_node(pgdat->node_id); in demote_page_list()
1512 struct pglist_data *pgdat, in shrink_page_list() argument
1526 do_demote_pass = can_demote(pgdat->node_id, sc); in shrink_page_list()
1630 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_page_list()
1782 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_page_list()
1944 nr_reclaimed += demote_page_list(&demote_pages, pgdat); in shrink_page_list()
2265 static int too_many_isolated(struct pglist_data *pgdat, int file, in too_many_isolated() argument
2278 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); in too_many_isolated()
2279 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); in too_many_isolated()
2281 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated()
2282 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated()
2297 wake_throttle_isolated(pgdat); in too_many_isolated()
2400 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_inactive_list() local
2403 while (unlikely(too_many_isolated(pgdat, file, sc))) { in shrink_inactive_list()
2409 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); in shrink_inactive_list()
2423 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_inactive_list()
2435 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false); in shrink_inactive_list()
2440 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2475 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2512 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_active_list() local
2521 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_active_list()
2583 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2588 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2731 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_scan_count() local
2742 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
2929 static bool can_age_anon_pages(struct pglist_data *pgdat, in can_age_anon_pages() argument
2937 return can_demote(pgdat->node_id, sc); in can_age_anon_pages()
3073 static inline bool should_continue_reclaim(struct pglist_data *pgdat, in should_continue_reclaim() argument
3100 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
3119 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); in should_continue_reclaim()
3120 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
3121 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); in should_continue_reclaim()
3126 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) in shrink_node_memcgs() argument
3133 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in shrink_node_memcgs()
3172 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
3183 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) in shrink_node() argument
3191 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
3268 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in shrink_node()
3269 file = node_page_state(pgdat, NR_ACTIVE_FILE) + in shrink_node()
3270 node_page_state(pgdat, NR_INACTIVE_FILE); in shrink_node()
3273 struct zone *zone = &pgdat->node_zones[z]; in shrink_node()
3285 anon = node_page_state(pgdat, NR_INACTIVE_ANON); in shrink_node()
3293 shrink_node_memcgs(pgdat, sc); in shrink_node()
3327 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
3331 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
3341 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); in shrink_node()
3365 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); in shrink_node()
3367 if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, in shrink_node()
3378 pgdat->kswapd_failures = 0; in shrink_node()
3413 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) in consider_reclaim_throttle() argument
3422 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
3440 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); in consider_reclaim_throttle()
3543 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) in snapshot_refaults() argument
3548 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); in snapshot_refaults()
3659 static bool allow_direct_reclaim(pg_data_t *pgdat) in allow_direct_reclaim() argument
3667 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
3671 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
3689 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
3690 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
3691 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
3693 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
3713 pg_data_t *pgdat = NULL; in throttle_direct_reclaim() local
3752 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
3753 if (allow_direct_reclaim(pgdat)) in throttle_direct_reclaim()
3759 if (!pgdat) in throttle_direct_reclaim()
3774 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
3775 allow_direct_reclaim(pgdat), HZ); in throttle_direct_reclaim()
3779 allow_direct_reclaim(pgdat)); in throttle_direct_reclaim()
3836 pg_data_t *pgdat, in mem_cgroup_shrink_node() argument
3839 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in mem_cgroup_shrink_node()
3912 static void age_active_anon(struct pglist_data *pgdat, in age_active_anon() argument
3918 if (!can_age_anon_pages(pgdat, sc)) in age_active_anon()
3921 lruvec = mem_cgroup_lruvec(NULL, pgdat); in age_active_anon()
3927 lruvec = mem_cgroup_lruvec(memcg, pgdat); in age_active_anon()
3934 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) in pgdat_watermark_boosted() argument
3947 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
3962 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) in pgdat_balanced() argument
3973 zone = pgdat->node_zones + i; in pgdat_balanced()
3995 static void clear_pgdat_congested(pg_data_t *pgdat) in clear_pgdat_congested() argument
3997 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); in clear_pgdat_congested()
4000 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
4001 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
4010 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, in prepare_kswapd_sleep() argument
4026 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
4027 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
4030 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
4033 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { in prepare_kswapd_sleep()
4034 clear_pgdat_congested(pgdat); in prepare_kswapd_sleep()
4049 static bool kswapd_shrink_node(pg_data_t *pgdat, in kswapd_shrink_node() argument
4058 zone = pgdat->node_zones + z; in kswapd_shrink_node()
4069 shrink_node(pgdat, sc); in kswapd_shrink_node()
4086 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) in update_reclaim_active() argument
4092 zone = pgdat->node_zones + i; in update_reclaim_active()
4105 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) in set_reclaim_active() argument
4107 update_reclaim_active(pgdat, highest_zoneidx, true); in set_reclaim_active()
4111 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) in clear_reclaim_active() argument
4113 update_reclaim_active(pgdat, highest_zoneidx, false); in clear_reclaim_active()
4129 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) in balance_pgdat() argument
4158 zone = pgdat->node_zones + i; in balance_pgdat()
4168 set_reclaim_active(pgdat, highest_zoneidx); in balance_pgdat()
4190 zone = pgdat->node_zones + i; in balance_pgdat()
4206 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); in balance_pgdat()
4239 age_active_anon(pgdat, &sc); in balance_pgdat()
4251 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, in balance_pgdat()
4260 if (kswapd_shrink_node(pgdat, &sc)) in balance_pgdat()
4268 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
4269 allow_direct_reclaim(pgdat)) in balance_pgdat()
4270 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
4299 pgdat->kswapd_failures++; in balance_pgdat()
4302 clear_reclaim_active(pgdat, highest_zoneidx); in balance_pgdat()
4313 zone = pgdat->node_zones + i; in balance_pgdat()
4323 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); in balance_pgdat()
4326 snapshot_refaults(NULL, pgdat); in balance_pgdat()
4347 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, in kswapd_highest_zoneidx() argument
4350 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
4355 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, in kswapd_try_to_sleep() argument
4364 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
4373 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { in kswapd_try_to_sleep()
4380 reset_isolation_suitable(pgdat); in kswapd_try_to_sleep()
4386 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); in kswapd_try_to_sleep()
4396 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
4397 kswapd_highest_zoneidx(pgdat, in kswapd_try_to_sleep()
4400 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
4401 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
4404 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
4405 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
4413 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { in kswapd_try_to_sleep()
4414 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
4424 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); in kswapd_try_to_sleep()
4429 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); in kswapd_try_to_sleep()
4436 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
4456 pg_data_t *pgdat = (pg_data_t *)p; in kswapd() local
4458 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kswapd()
4478 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
4479 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
4480 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
4484 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
4485 highest_zoneidx = kswapd_highest_zoneidx(pgdat, in kswapd()
4489 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, in kswapd()
4493 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
4494 highest_zoneidx = kswapd_highest_zoneidx(pgdat, in kswapd()
4496 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
4497 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
4518 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
4520 reclaim_order = balance_pgdat(pgdat, alloc_order, in kswapd()
4541 pg_data_t *pgdat; in wakeup_kswapd() local
4550 pgdat = zone->zone_pgdat; in wakeup_kswapd()
4551 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
4554 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
4556 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
4557 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
4559 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
4563 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
4564 (pgdat_balanced(pgdat, order, highest_zoneidx) && in wakeup_kswapd()
4565 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { in wakeup_kswapd()
4574 wakeup_kcompactd(pgdat, order, highest_zoneidx); in wakeup_kswapd()
4578 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
4580 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
4628 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_run() local
4630 if (pgdat->kswapd) in kswapd_run()
4633 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); in kswapd_run()
4634 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
4638 pgdat->kswapd = NULL; in kswapd_run()
4696 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) in node_unmapped_file_pages() argument
4698 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); in node_unmapped_file_pages()
4699 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + in node_unmapped_file_pages()
4700 node_page_state(pgdat, NR_ACTIVE_FILE); in node_unmapped_file_pages()
4711 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) in node_pagecache_reclaimable() argument
4723 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); in node_pagecache_reclaimable()
4725 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); in node_pagecache_reclaimable()
4729 delta += node_page_state(pgdat, NR_FILE_DIRTY); in node_pagecache_reclaimable()
4741 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in __node_reclaim() argument
4759 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
4774 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { in __node_reclaim()
4780 shrink_node(pgdat, &sc); in __node_reclaim()
4795 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in node_reclaim() argument
4809 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
4810 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= in node_reclaim()
4811 pgdat->min_slab_pages) in node_reclaim()
4826 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
4829 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
4832 ret = __node_reclaim(pgdat, gfp_mask, order); in node_reclaim()
4833 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()