319 lines
8.5 KiB
Diff
319 lines
8.5 KiB
Diff
From 9292d9753eeab6dd9492ec28b79045ab4890652a Mon Sep 17 00:00:00 2001
|
|
From: Bijan Tabatabai <bijantabatab@micron.com>
|
|
Date: Tue, 8 Jul 2025 19:59:38 -0500
|
|
Subject: mm/damon: move migration helpers from paddr to ops-common
|
|
|
|
This patch moves the damon_pa_migrate_pages function along with its
|
|
corresponding helper functions from paddr to ops-common. The function
|
|
prefix of "damon_pa_" was also changed to just "damon_" accordingly.
|
|
|
|
This patch will allow page migration to be available to vaddr schemes as
|
|
well as paddr schemes.
|
|
|
|
Link: https://lkml.kernel.org/r/20250709005952.17776-9-bijan311@gmail.com
|
|
Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
|
|
Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
|
|
Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
|
|
Reviewed-by: SeongJae Park <sj@kernel.org>
|
|
Cc: Jonathan Corbet <corbet@lwn.net>
|
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
|
---
|
|
mm/damon/ops-common.c | 120 +++++++++++++++++++++++++++++++++++++++++
|
|
mm/damon/ops-common.h | 2 +
|
|
mm/damon/paddr.c | 122 +-----------------------------------------
|
|
3 files changed, 123 insertions(+), 121 deletions(-)
|
|
|
|
--- a/mm/damon/ops-common.c
|
|
+++ b/mm/damon/ops-common.c
|
|
@@ -5,6 +5,7 @@
|
|
* Author: SeongJae Park <sj@kernel.org>
|
|
*/
|
|
|
|
+#include <linux/migrate.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/page_idle.h>
|
|
#include <linux/pagemap.h>
|
|
@@ -12,6 +13,7 @@
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
|
|
+#include "../internal.h"
|
|
#include "ops-common.h"
|
|
|
|
/*
|
|
@@ -138,3 +140,121 @@ int damon_cold_score(struct damon_ctx *c
|
|
/* Return coldness of the region */
|
|
return DAMOS_MAX_SCORE - hotness;
|
|
}
|
|
+
|
|
+static unsigned int __damon_migrate_folio_list(
|
|
+ struct list_head *migrate_folios, struct pglist_data *pgdat,
|
|
+ int target_nid)
|
|
+{
|
|
+ unsigned int nr_succeeded = 0;
|
|
+ struct migration_target_control mtc = {
|
|
+ /*
|
|
+ * Allocate from 'node', or fail quickly and quietly.
|
|
+ * When this happens, 'page' will likely just be discarded
|
|
+ * instead of migrated.
|
|
+ */
|
|
+ .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
|
|
+ __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
|
|
+ .nid = target_nid,
|
|
+ };
|
|
+
|
|
+ if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
|
|
+ return 0;
|
|
+
|
|
+ if (list_empty(migrate_folios))
|
|
+ return 0;
|
|
+
|
|
+ /* Migration ignores all cpuset and mempolicy settings */
|
|
+ migrate_pages(migrate_folios, alloc_migration_target, NULL,
|
|
+ (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
|
|
+ &nr_succeeded);
|
|
+
|
|
+ return nr_succeeded;
|
|
+}
|
|
+
|
|
+static unsigned int damon_migrate_folio_list(struct list_head *folio_list,
|
|
+ struct pglist_data *pgdat,
|
|
+ int target_nid)
|
|
+{
|
|
+ unsigned int nr_migrated = 0;
|
|
+ struct folio *folio;
|
|
+ LIST_HEAD(ret_folios);
|
|
+ LIST_HEAD(migrate_folios);
|
|
+
|
|
+ while (!list_empty(folio_list)) {
|
|
+ struct folio *folio;
|
|
+
|
|
+ cond_resched();
|
|
+
|
|
+ folio = lru_to_folio(folio_list);
|
|
+ list_del(&folio->lru);
|
|
+
|
|
+ if (!folio_trylock(folio))
|
|
+ goto keep;
|
|
+
|
|
+ /* Relocate its contents to another node. */
|
|
+ list_add(&folio->lru, &migrate_folios);
|
|
+ folio_unlock(folio);
|
|
+ continue;
|
|
+keep:
|
|
+ list_add(&folio->lru, &ret_folios);
|
|
+ }
|
|
+ /* 'folio_list' is always empty here */
|
|
+
|
|
+ /* Migrate folios selected for migration */
|
|
+ nr_migrated += __damon_migrate_folio_list(
|
|
+ &migrate_folios, pgdat, target_nid);
|
|
+ /*
|
|
+ * Folios that could not be migrated are still in @migrate_folios. Add
|
|
+ * those back on @folio_list
|
|
+ */
|
|
+ if (!list_empty(&migrate_folios))
|
|
+ list_splice_init(&migrate_folios, folio_list);
|
|
+
|
|
+ try_to_unmap_flush();
|
|
+
|
|
+ list_splice(&ret_folios, folio_list);
|
|
+
|
|
+ while (!list_empty(folio_list)) {
|
|
+ folio = lru_to_folio(folio_list);
|
|
+ list_del(&folio->lru);
|
|
+ folio_putback_lru(folio);
|
|
+ }
|
|
+
|
|
+ return nr_migrated;
|
|
+}
|
|
+
|
|
+unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid)
|
|
+{
|
|
+ int nid;
|
|
+ unsigned long nr_migrated = 0;
|
|
+ LIST_HEAD(node_folio_list);
|
|
+ unsigned int noreclaim_flag;
|
|
+
|
|
+ if (list_empty(folio_list))
|
|
+ return nr_migrated;
|
|
+
|
|
+ noreclaim_flag = memalloc_noreclaim_save();
|
|
+
|
|
+ nid = folio_nid(lru_to_folio(folio_list));
|
|
+ do {
|
|
+ struct folio *folio = lru_to_folio(folio_list);
|
|
+
|
|
+ if (nid == folio_nid(folio)) {
|
|
+ list_move(&folio->lru, &node_folio_list);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ nr_migrated += damon_migrate_folio_list(&node_folio_list,
|
|
+ NODE_DATA(nid),
|
|
+ target_nid);
|
|
+ nid = folio_nid(lru_to_folio(folio_list));
|
|
+ } while (!list_empty(folio_list));
|
|
+
|
|
+ nr_migrated += damon_migrate_folio_list(&node_folio_list,
|
|
+ NODE_DATA(nid),
|
|
+ target_nid);
|
|
+
|
|
+ memalloc_noreclaim_restore(noreclaim_flag);
|
|
+
|
|
+ return nr_migrated;
|
|
+}
|
|
--- a/mm/damon/ops-common.h
|
|
+++ b/mm/damon/ops-common.h
|
|
@@ -16,3 +16,5 @@ int damon_cold_score(struct damon_ctx *c
|
|
struct damos *s);
|
|
int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
|
|
struct damos *s);
|
|
+
|
|
+unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid);
|
|
--- a/mm/damon/paddr.c
|
|
+++ b/mm/damon/paddr.c
|
|
@@ -13,7 +13,6 @@
|
|
#include <linux/rmap.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/memory-tiers.h>
|
|
-#include <linux/migrate.h>
|
|
#include <linux/mm_inline.h>
|
|
|
|
#include "../internal.h"
|
|
@@ -381,125 +380,6 @@ static unsigned long damon_pa_deactivate
|
|
sz_filter_passed);
|
|
}
|
|
|
|
-static unsigned int __damon_pa_migrate_folio_list(
|
|
- struct list_head *migrate_folios, struct pglist_data *pgdat,
|
|
- int target_nid)
|
|
-{
|
|
- unsigned int nr_succeeded = 0;
|
|
- struct migration_target_control mtc = {
|
|
- /*
|
|
- * Allocate from 'node', or fail quickly and quietly.
|
|
- * When this happens, 'page' will likely just be discarded
|
|
- * instead of migrated.
|
|
- */
|
|
- .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
|
|
- __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT,
|
|
- .nid = target_nid,
|
|
- };
|
|
-
|
|
- if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
|
|
- return 0;
|
|
-
|
|
- if (list_empty(migrate_folios))
|
|
- return 0;
|
|
-
|
|
- /* Migration ignores all cpuset and mempolicy settings */
|
|
- migrate_pages(migrate_folios, alloc_migration_target, NULL,
|
|
- (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
|
|
- &nr_succeeded);
|
|
-
|
|
- return nr_succeeded;
|
|
-}
|
|
-
|
|
-static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list,
|
|
- struct pglist_data *pgdat,
|
|
- int target_nid)
|
|
-{
|
|
- unsigned int nr_migrated = 0;
|
|
- struct folio *folio;
|
|
- LIST_HEAD(ret_folios);
|
|
- LIST_HEAD(migrate_folios);
|
|
-
|
|
- while (!list_empty(folio_list)) {
|
|
- struct folio *folio;
|
|
-
|
|
- cond_resched();
|
|
-
|
|
- folio = lru_to_folio(folio_list);
|
|
- list_del(&folio->lru);
|
|
-
|
|
- if (!folio_trylock(folio))
|
|
- goto keep;
|
|
-
|
|
- /* Relocate its contents to another node. */
|
|
- list_add(&folio->lru, &migrate_folios);
|
|
- folio_unlock(folio);
|
|
- continue;
|
|
-keep:
|
|
- list_add(&folio->lru, &ret_folios);
|
|
- }
|
|
- /* 'folio_list' is always empty here */
|
|
-
|
|
- /* Migrate folios selected for migration */
|
|
- nr_migrated += __damon_pa_migrate_folio_list(
|
|
- &migrate_folios, pgdat, target_nid);
|
|
- /*
|
|
- * Folios that could not be migrated are still in @migrate_folios. Add
|
|
- * those back on @folio_list
|
|
- */
|
|
- if (!list_empty(&migrate_folios))
|
|
- list_splice_init(&migrate_folios, folio_list);
|
|
-
|
|
- try_to_unmap_flush();
|
|
-
|
|
- list_splice(&ret_folios, folio_list);
|
|
-
|
|
- while (!list_empty(folio_list)) {
|
|
- folio = lru_to_folio(folio_list);
|
|
- list_del(&folio->lru);
|
|
- folio_putback_lru(folio);
|
|
- }
|
|
-
|
|
- return nr_migrated;
|
|
-}
|
|
-
|
|
-static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
|
|
- int target_nid)
|
|
-{
|
|
- int nid;
|
|
- unsigned long nr_migrated = 0;
|
|
- LIST_HEAD(node_folio_list);
|
|
- unsigned int noreclaim_flag;
|
|
-
|
|
- if (list_empty(folio_list))
|
|
- return nr_migrated;
|
|
-
|
|
- noreclaim_flag = memalloc_noreclaim_save();
|
|
-
|
|
- nid = folio_nid(lru_to_folio(folio_list));
|
|
- do {
|
|
- struct folio *folio = lru_to_folio(folio_list);
|
|
-
|
|
- if (nid == folio_nid(folio)) {
|
|
- list_move(&folio->lru, &node_folio_list);
|
|
- continue;
|
|
- }
|
|
-
|
|
- nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
|
|
- NODE_DATA(nid),
|
|
- target_nid);
|
|
- nid = folio_nid(lru_to_folio(folio_list));
|
|
- } while (!list_empty(folio_list));
|
|
-
|
|
- nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
|
|
- NODE_DATA(nid),
|
|
- target_nid);
|
|
-
|
|
- memalloc_noreclaim_restore(noreclaim_flag);
|
|
-
|
|
- return nr_migrated;
|
|
-}
|
|
-
|
|
static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
|
|
unsigned long *sz_filter_passed)
|
|
{
|
|
@@ -527,7 +407,7 @@ put_folio:
|
|
addr += folio_size(folio);
|
|
folio_put(folio);
|
|
}
|
|
- applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
|
|
+ applied = damon_migrate_pages(&folio_list, s->target_nid);
|
|
cond_resched();
|
|
s->last_applied = folio;
|
|
return applied * PAGE_SIZE;
|