101 lines
3.6 KiB
Diff
101 lines
3.6 KiB
Diff
From 4b247e559e4046bbbfab468e66f9d3197eaf12ec Mon Sep 17 00:00:00 2001
|
|
From: David Hildenbrand <david@redhat.com>
|
|
Date: Wed, 11 Jun 2025 15:13:14 +0200
|
|
Subject: mm/gup: revert "mm: gup: fix infinite loop within
|
|
__get_longterm_locked"
|
|
|
|
After commit 1aaf8c122918 ("mm: gup: fix infinite loop within
|
|
__get_longterm_locked") we are able to longterm pin folios that are not
|
|
supposed to get longterm pinned, simply because they temporarily have the
|
|
LRU flag cleared (esp. temporarily isolated).
|
|
|
|
For example, two __get_longterm_locked() callers can race, or
|
|
__get_longterm_locked() can race with anything else that temporarily
|
|
isolates folios.
|
|
|
|
The introducing commit mentions the use case of a driver that uses
|
|
vm_ops->fault to insert pages allocated through cma_alloc() into the page
|
|
tables, assuming they can later get longterm pinned. These pages/ folios
|
|
would never have the LRU flag set and consequently cannot get isolated.
|
|
There is no known in-tree user making use of that so far, fortunately.
|
|
|
|
To handle that in the future -- and avoid retrying forever to
|
|
isolate/migrate them -- we will need a different mechanism for the CMA
|
|
area *owner* to indicate that it actually already allocated the page and
|
|
is fine with longterm pinning it. The LRU flag is not suitable for that.
|
|
|
|
Probably we can lookup the relevant CMA area and query the bitmap; we only
|
|
have have to care about some races, probably. If already allocated, we
|
|
could just allow longterm pinning)
|
|
|
|
Anyhow, let's fix the "must not be longterm pinned" problem first by
|
|
reverting the original commit.
|
|
|
|
Link: https://lkml.kernel.org/r/20250611131314.594529-1-david@redhat.com
|
|
Fixes: 1aaf8c122918 ("mm: gup: fix infinite loop within __get_longterm_locked")
|
|
Signed-off-by: David Hildenbrand <david@redhat.com>
|
|
Closes: https://lore.kernel.org/all/20250522092755.GA3277597@tiffany/
|
|
Reported-by: Hyesoo Yu <hyesoo.yu@samsung.com>
|
|
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
|
|
Cc: Jason Gunthorpe <jgg@ziepe.ca>
|
|
Cc: Peter Xu <peterx@redhat.com>
|
|
Cc: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
|
|
Cc: Aijun Sun <aijun.sun@unisoc.com>
|
|
Cc: Alistair Popple <apopple@nvidia.com>
|
|
Cc: <stable@vger.kernel.org>
|
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
|
---
|
|
mm/gup.c | 14 ++++++++++----
|
|
1 file changed, 10 insertions(+), 4 deletions(-)
|
|
|
|
--- a/mm/gup.c
|
|
+++ b/mm/gup.c
|
|
@@ -2320,13 +2320,13 @@ static void pofs_unpin(struct pages_or_f
|
|
/*
|
|
* Returns the number of collected folios. Return value is always >= 0.
|
|
*/
|
|
-static void collect_longterm_unpinnable_folios(
|
|
+static unsigned long collect_longterm_unpinnable_folios(
|
|
struct list_head *movable_folio_list,
|
|
struct pages_or_folios *pofs)
|
|
{
|
|
+ unsigned long i, collected = 0;
|
|
struct folio *prev_folio = NULL;
|
|
bool drain_allow = true;
|
|
- unsigned long i;
|
|
|
|
for (i = 0; i < pofs->nr_entries; i++) {
|
|
struct folio *folio = pofs_get_folio(pofs, i);
|
|
@@ -2338,6 +2338,8 @@ static void collect_longterm_unpinnable_
|
|
if (folio_is_longterm_pinnable(folio))
|
|
continue;
|
|
|
|
+ collected++;
|
|
+
|
|
if (folio_is_device_coherent(folio))
|
|
continue;
|
|
|
|
@@ -2359,6 +2361,8 @@ static void collect_longterm_unpinnable_
|
|
NR_ISOLATED_ANON + folio_is_file_lru(folio),
|
|
folio_nr_pages(folio));
|
|
}
|
|
+
|
|
+ return collected;
|
|
}
|
|
|
|
/*
|
|
@@ -2435,9 +2439,11 @@ static long
|
|
check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
|
|
{
|
|
LIST_HEAD(movable_folio_list);
|
|
+ unsigned long collected;
|
|
|
|
- collect_longterm_unpinnable_folios(&movable_folio_list, pofs);
|
|
- if (list_empty(&movable_folio_list))
|
|
+ collected = collect_longterm_unpinnable_folios(&movable_folio_list,
|
|
+ pofs);
|
|
+ if (!collected)
|
|
return 0;
|
|
|
|
return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
|