diff --git a/debian/changelog b/debian/changelog index 82e7a40..829b519 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +linux (6.15.5-1) sid; urgency=medium + + * New upstream stable update: + https://www.kernel.org/pub/linux/kernel/v6.x/ChangeLog-6.15.5 + + -- Konstantin Demin Mon, 07 Jul 2025 01:40:22 +0300 + linux (6.15.4-1) sid; urgency=medium * New upstream stable update: diff --git a/debian/config/amd64/config.cloud b/debian/config/amd64/config.cloud index 5a2d7d1..dcd1762 100644 --- a/debian/config/amd64/config.cloud +++ b/debian/config/amd64/config.cloud @@ -131,7 +131,8 @@ CONFIG_CRYPTO_ZSTD=m CONFIG_ATA=m # CONFIG_ATA_ACPI is not set # CONFIG_SATA_PMP is not set -# CONFIG_SATA_AHCI is not set +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 # CONFIG_SATA_AHCI_PLATFORM is not set # CONFIG_AHCI_DWC is not set # CONFIG_SATA_ACARD_AHCI is not set diff --git a/debian/config/amd64/config.vm b/debian/config/amd64/config.vm index d47f31c..a37a7df 100644 --- a/debian/config/amd64/config.vm +++ b/debian/config/amd64/config.vm @@ -146,7 +146,8 @@ CONFIG_ANDROID_BINDER_DEVICES="binder" CONFIG_ATA=m # CONFIG_ATA_ACPI is not set # CONFIG_SATA_PMP is not set -# CONFIG_SATA_AHCI is not set +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 # CONFIG_SATA_AHCI_PLATFORM is not set # CONFIG_AHCI_DWC is not set # CONFIG_SATA_ACARD_AHCI is not set diff --git a/debian/patches/bugfix/all/Revert-mmc-sdhci-Disable-SD-card-clock-before-changi.patch b/debian/patches/bugfix/all/Revert-mmc-sdhci-Disable-SD-card-clock-before-changi.patch new file mode 100644 index 0000000..db74c89 --- /dev/null +++ b/debian/patches/bugfix/all/Revert-mmc-sdhci-Disable-SD-card-clock-before-changi.patch @@ -0,0 +1,45 @@ +From: Ulf Hansson +Date: Tue, 24 Jun 2025 13:09:32 +0200 +Subject: Revert "mmc: sdhci: Disable SD card clock before changing parameters" +Origin: https://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git/commit?id=dcc3bcfc5b50c625b475dcc25d167b6b947a6637 +Bug-Debian: https://bugs.debian.org/1108065 + +It has turned out the trying to strictly conform to the SDHCI specification +is causing problems. Let's revert and start over. + +This reverts commit fb3bbc46c94f261b6156ee863c1b06c84cf157dc. + +Cc: Erick Shepherd +Cc: stable@vger.kernel.org +Fixes: fb3bbc46c94f ("mmc: sdhci: Disable SD card clock before changing parameters") +Suggested-by: Adrian Hunter +Reported-by: Jonathan Liu +Reported-by: Salvatore Bonaccorso +Closes: https://bugs.debian.org/1108065 +Acked-by: Adrian Hunter +Signed-off-by: Ulf Hansson +Link: https://lore.kernel.org/r/20250624110932.176925-1-ulf.hansson@linaro.org +--- + drivers/mmc/host/sdhci.c | 9 ++------- + 1 file changed, 2 insertions(+), 7 deletions(-) + +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -2065,15 +2065,10 @@ void sdhci_set_clock(struct sdhci_host * + + host->mmc->actual_clock = 0; + +- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); +- if (clk & SDHCI_CLOCK_CARD_EN) +- sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, +- SDHCI_CLOCK_CONTROL); ++ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + +- if (clock == 0) { +- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); ++ if (clock == 0) + return; +- } + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_enable_clk(host, clk); diff --git a/debian/patches/debian/android-enable-building-ashmem-and-binder-as-modules.patch b/debian/patches/debian/android-enable-building-ashmem-and-binder-as-modules.patch index 28a4e29..f45877b 100644 --- a/debian/patches/debian/android-enable-building-ashmem-and-binder-as-modules.patch +++ b/debian/patches/debian/android-enable-building-ashmem-and-binder-as-modules.patch @@ -80,7 +80,7 @@ Consequently, the ashmem part of this patch has been removed. { --- a/mm/memory.c +++ b/mm/memory.c -@@ -6589,6 +6589,7 @@ inval: +@@ -6569,6 +6569,7 @@ inval: count_vm_vma_lock_event(VMA_LOCK_ABORT); return NULL; } diff --git a/debian/patches/misc-openwrt/0008-mac80211-add-AQL-support-for-broadcast-packets.patch b/debian/patches/misc-openwrt/0008-mac80211-add-AQL-support-for-broadcast-packets.patch index fabde38..b08e3fb 100644 --- a/debian/patches/misc-openwrt/0008-mac80211-add-AQL-support-for-broadcast-packets.patch +++ b/debian/patches/misc-openwrt/0008-mac80211-add-AQL-support-for-broadcast-packets.patch @@ -70,7 +70,7 @@ Signed-off-by: Felix Fietkau --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h -@@ -1368,10 +1368,12 @@ struct ieee80211_local { +@@ -1377,10 +1377,12 @@ struct ieee80211_local { spinlock_t handle_wake_tx_queue_lock; u16 airtime_flags; diff --git a/debian/patches/patchset-pf/fixes/0006-drm-i915-snps_hdmi_pll-Fix-64-bit-divisor-truncation.patch b/debian/patches/patchset-pf/fixes/0006-drm-i915-snps_hdmi_pll-Fix-64-bit-divisor-truncation.patch index 6cefff9..dbd0097 100644 --- a/debian/patches/patchset-pf/fixes/0006-drm-i915-snps_hdmi_pll-Fix-64-bit-divisor-truncation.patch +++ b/debian/patches/patchset-pf/fixes/0006-drm-i915-snps_hdmi_pll-Fix-64-bit-divisor-truncation.patch @@ -31,8 +31,8 @@ Cherry-picked-for: https://gitlab.archlinux.org/archlinux/packaging/packages/lin DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE0_MULTIPLIER))); ana_cp_int_temp = -- DIV_ROUND_CLOSEST_ULL(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1), -- CURVE2_MULTIPLIER); +- DIV64_U64_ROUND_CLOSEST(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1), +- CURVE2_MULTIPLIER); + div64_u64(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1), + CURVE2_MULTIPLIER); diff --git a/debian/patches/patchset-pf/fixes/0007-mm-compaction-use-folio-in-hugetlb-pathway.patch b/debian/patches/patchset-pf/fixes/0007-mm-compaction-use-folio-in-hugetlb-pathway.patch new file mode 100644 index 0000000..d9c7e5c --- /dev/null +++ b/debian/patches/patchset-pf/fixes/0007-mm-compaction-use-folio-in-hugetlb-pathway.patch @@ -0,0 +1,86 @@ +From 336152e6ec30b13f5617ee9b702beb5bc310c6a7 Mon Sep 17 00:00:00 2001 +From: "Vishal Moola (Oracle)" +Date: Mon, 31 Mar 2025 19:10:25 -0700 +Subject: mm/compaction: use folio in hugetlb pathway + +Use a folio in the hugetlb pathway during the compaction migrate-able +pageblock scan. + +This removes a call to compound_head(). + +Link: https://lkml.kernel.org/r/20250401021025.637333-2-vishal.moola@gmail.com +Signed-off-by: Vishal Moola (Oracle) +Acked-by: Oscar Salvador +Reviewed-by: Zi Yan +Cc: Muchun Song +Signed-off-by: Andrew Morton +--- + include/linux/hugetlb.h | 4 ++-- + mm/compaction.c | 8 ++++---- + mm/hugetlb.c | 3 +-- + 3 files changed, 7 insertions(+), 8 deletions(-) + +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -703,7 +703,7 @@ struct huge_bootmem_page { + + bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m); + +-int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); ++int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list); + int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); + void wait_for_freed_hugetlb_folios(void); + struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, +@@ -1091,7 +1091,7 @@ static inline struct folio *filemap_lock + return NULL; + } + +-static inline int isolate_or_dissolve_huge_page(struct page *page, ++static inline int isolate_or_dissolve_huge_folio(struct folio *folio, + struct list_head *list) + { + return -ENOMEM; +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -1001,10 +1001,11 @@ isolate_migratepages_block(struct compac + locked = NULL; + } + +- ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); ++ folio = page_folio(page); ++ ret = isolate_or_dissolve_huge_folio(folio, &cc->migratepages); + + /* +- * Fail isolation in case isolate_or_dissolve_huge_page() ++ * Fail isolation in case isolate_or_dissolve_huge_folio() + * reports an error. In case of -ENOMEM, abort right away. + */ + if (ret < 0) { +@@ -1016,12 +1017,11 @@ isolate_migratepages_block(struct compac + goto isolate_fail; + } + +- if (PageHuge(page)) { ++ if (folio_test_hugetlb(folio)) { + /* + * Hugepage was successfully isolated and placed + * on the cc->migratepages list. + */ +- folio = page_folio(page); + low_pfn += folio_nr_pages(folio) - 1; + goto isolate_success_no_list; + } +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2896,10 +2896,9 @@ free_new: + return ret; + } + +-int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) ++int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list) + { + struct hstate *h; +- struct folio *folio = page_folio(page); + int ret = -EBUSY; + + /* diff --git a/debian/patches/patchset-pf/fixes/0007-mm-shmem-swap-fix-softlockup-with-mTHP-swapin.patch b/debian/patches/patchset-pf/fixes/0007-mm-shmem-swap-fix-softlockup-with-mTHP-swapin.patch deleted file mode 100644 index 6463ce6..0000000 --- a/debian/patches/patchset-pf/fixes/0007-mm-shmem-swap-fix-softlockup-with-mTHP-swapin.patch +++ /dev/null @@ -1,190 +0,0 @@ -From 3a317593ed60909e02e059a43b2ef588f95fd457 Mon Sep 17 00:00:00 2001 -From: Kairui Song -Date: Tue, 10 Jun 2025 01:17:51 +0800 -Subject: mm/shmem, swap: fix softlockup with mTHP swapin -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Following softlockup can be easily reproduced on my test machine with: - -echo always > /sys/kernel/mm/transparent_hugepage/hugepages-64kB/enabled -swapon /dev/zram0 # zram0 is a 48G swap device -mkdir -p /sys/fs/cgroup/memory/test -echo 1G > /sys/fs/cgroup/test/memory.max -echo $BASHPID > /sys/fs/cgroup/test/cgroup.procs -while true; do - dd if=/dev/zero of=/tmp/test.img bs=1M count=5120 - cat /tmp/test.img > /dev/null - rm /tmp/test.img -done - -Then after a while: -watchdog: BUG: soft lockup - CPU#0 stuck for 763s! [cat:5787] -Modules linked in: zram virtiofs -CPU: 0 UID: 0 PID: 5787 Comm: cat Kdump: loaded Tainted: G L 6.15.0.orig-gf3021d9246bc-dirty #118 PREEMPT(voluntary)ยท -Tainted: [L]=SOFTLOCKUP -Hardware name: Red Hat KVM/RHEL-AV, BIOS 0.0.0 02/06/2015 -RIP: 0010:mpol_shared_policy_lookup+0xd/0x70 -Code: e9 b8 b4 ff ff 31 c0 c3 cc cc cc cc 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 66 0f 1f 00 0f 1f 44 00 00 41 54 55 53 <48> 8b 1f 48 85 db 74 41 4c 8d 67 08 48 89 fb 48 89 f5 4c 89 e7 e8 -RSP: 0018:ffffc90002b1fc28 EFLAGS: 00000202 -RAX: 00000000001c20ca RBX: 0000000000724e1e RCX: 0000000000000001 -RDX: ffff888118e214c8 RSI: 0000000000057d42 RDI: ffff888118e21518 -RBP: 000000000002bec8 R08: 0000000000000001 R09: 0000000000000000 -R10: 0000000000000bf4 R11: 0000000000000000 R12: 0000000000000001 -R13: 00000000001c20ca R14: 00000000001c20ca R15: 0000000000000000 -FS: 00007f03f995c740(0000) GS:ffff88a07ad9a000(0000) knlGS:0000000000000000 -CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 -CR2: 00007f03f98f1000 CR3: 0000000144626004 CR4: 0000000000770eb0 -DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 -DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 -PKRU: 55555554 -Call Trace: - - shmem_alloc_folio+0x31/0xc0 - shmem_swapin_folio+0x309/0xcf0 - ? filemap_get_entry+0x117/0x1e0 - ? xas_load+0xd/0xb0 - ? filemap_get_entry+0x101/0x1e0 - shmem_get_folio_gfp+0x2ed/0x5b0 - shmem_file_read_iter+0x7f/0x2e0 - vfs_read+0x252/0x330 - ksys_read+0x68/0xf0 - do_syscall_64+0x4c/0x1c0 - entry_SYSCALL_64_after_hwframe+0x76/0x7e -RIP: 0033:0x7f03f9a46991 -Code: 00 48 8b 15 81 14 10 00 f7 d8 64 89 02 b8 ff ff ff ff eb bd e8 20 ad 01 00 f3 0f 1e fa 80 3d 35 97 10 00 00 74 13 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 4f c3 66 0f 1f 44 00 00 55 48 89 e5 48 83 ec -RSP: 002b:00007fff3c52bd28 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 -RAX: ffffffffffffffda RBX: 0000000000040000 RCX: 00007f03f9a46991 -RDX: 0000000000040000 RSI: 00007f03f98ba000 RDI: 0000000000000003 -RBP: 00007fff3c52bd50 R08: 0000000000000000 R09: 00007f03f9b9a380 -R10: 0000000000000022 R11: 0000000000000246 R12: 0000000000040000 -R13: 00007f03f98ba000 R14: 0000000000000003 R15: 0000000000000000 - - -The reason is simple, readahead brought some order 0 folio in swap cache, -and the swapin mTHP folio being allocated is in conflict with it, so -swapcache_prepare fails and causes shmem_swap_alloc_folio to return --EEXIST, and shmem simply retries again and again causing this loop. - -Fix it by applying a similar fix for anon mTHP swapin. - -The performance change is very slight, time of swapin 10g zero folios -with shmem (test for 12 times): -Before: 2.47s -After: 2.48s - -[kasong@tencent.com: add comment] - Link: https://lkml.kernel.org/r/20250610181645.45922-1-ryncsn@gmail.com -Link: https://lkml.kernel.org/r/20250610181645.45922-1-ryncsn@gmail.com -Link: https://lkml.kernel.org/r/20250609171751.36305-1-ryncsn@gmail.com -Fixes: 1dd44c0af4fa ("mm: shmem: skip swapcache for swapin of synchronous swap device") -Signed-off-by: Kairui Song -Reviewed-by: Barry Song -Acked-by: Nhat Pham -Reviewed-by: Baolin Wang -Cc: Baoquan He -Cc: Chris Li -Cc: Hugh Dickins -Cc: Kemeng Shi -Cc: Usama Arif -Cc: -Signed-off-by: Andrew Morton ---- - mm/memory.c | 20 -------------------- - mm/shmem.c | 6 +++++- - mm/swap.h | 23 +++++++++++++++++++++++ - 3 files changed, 28 insertions(+), 21 deletions(-) - ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -4225,26 +4225,6 @@ static struct folio *__alloc_swap_folio( - } - - #ifdef CONFIG_TRANSPARENT_HUGEPAGE --static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) --{ -- struct swap_info_struct *si = swp_swap_info(entry); -- pgoff_t offset = swp_offset(entry); -- int i; -- -- /* -- * While allocating a large folio and doing swap_read_folio, which is -- * the case the being faulted pte doesn't have swapcache. We need to -- * ensure all PTEs have no cache as well, otherwise, we might go to -- * swap devices while the content is in swapcache. -- */ -- for (i = 0; i < max_nr; i++) { -- if ((si->swap_map[offset + i] & SWAP_HAS_CACHE)) -- return i; -- } -- -- return i; --} -- - /* - * Check if the PTEs within a range are contiguous swap entries - * and have consistent swapcache, zeromap. ---- a/mm/shmem.c -+++ b/mm/shmem.c -@@ -2262,6 +2262,7 @@ static int shmem_swapin_folio(struct ino - folio = swap_cache_get_folio(swap, NULL, 0); - order = xa_get_order(&mapping->i_pages, index); - if (!folio) { -+ int nr_pages = 1 << order; - bool fallback_order0 = false; - - /* Or update major stats only when swapin succeeds?? */ -@@ -2275,9 +2276,12 @@ static int shmem_swapin_folio(struct ino - * If uffd is active for the vma, we need per-page fault - * fidelity to maintain the uffd semantics, then fallback - * to swapin order-0 folio, as well as for zswap case. -+ * Any existing sub folio in the swap cache also blocks -+ * mTHP swapin. - */ - if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) || -- !zswap_never_enabled())) -+ !zswap_never_enabled() || -+ non_swapcache_batch(swap, nr_pages) != nr_pages)) - fallback_order0 = true; - - /* Skip swapcache for synchronous device. */ ---- a/mm/swap.h -+++ b/mm/swap.h -@@ -106,6 +106,25 @@ static inline int swap_zeromap_batch(swp - return find_next_bit(sis->zeromap, end, start) - start; - } - -+static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) -+{ -+ struct swap_info_struct *si = swp_swap_info(entry); -+ pgoff_t offset = swp_offset(entry); -+ int i; -+ -+ /* -+ * While allocating a large folio and doing mTHP swapin, we need to -+ * ensure all entries are not cached, otherwise, the mTHP folio will -+ * be in conflict with the folio in swap cache. -+ */ -+ for (i = 0; i < max_nr; i++) { -+ if ((si->swap_map[offset + i] & SWAP_HAS_CACHE)) -+ return i; -+ } -+ -+ return i; -+} -+ - #else /* CONFIG_SWAP */ - struct swap_iocb; - static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug) -@@ -199,6 +218,10 @@ static inline int swap_zeromap_batch(swp - return 0; - } - -+static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) -+{ -+ return 0; -+} - #endif /* CONFIG_SWAP */ - - #endif /* _MM_SWAP_H */ diff --git a/debian/patches/patchset-pf/fixes/0008-mm-gup-revert-mm-gup-fix-infinite-loop-within-__get_.patch b/debian/patches/patchset-pf/fixes/0008-mm-gup-revert-mm-gup-fix-infinite-loop-within-__get_.patch deleted file mode 100644 index 2b98b91..0000000 --- a/debian/patches/patchset-pf/fixes/0008-mm-gup-revert-mm-gup-fix-infinite-loop-within-__get_.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 4b247e559e4046bbbfab468e66f9d3197eaf12ec Mon Sep 17 00:00:00 2001 -From: David Hildenbrand -Date: Wed, 11 Jun 2025 15:13:14 +0200 -Subject: mm/gup: revert "mm: gup: fix infinite loop within - __get_longterm_locked" - -After commit 1aaf8c122918 ("mm: gup: fix infinite loop within -__get_longterm_locked") we are able to longterm pin folios that are not -supposed to get longterm pinned, simply because they temporarily have the -LRU flag cleared (esp. temporarily isolated). - -For example, two __get_longterm_locked() callers can race, or -__get_longterm_locked() can race with anything else that temporarily -isolates folios. - -The introducing commit mentions the use case of a driver that uses -vm_ops->fault to insert pages allocated through cma_alloc() into the page -tables, assuming they can later get longterm pinned. These pages/ folios -would never have the LRU flag set and consequently cannot get isolated. -There is no known in-tree user making use of that so far, fortunately. - -To handle that in the future -- and avoid retrying forever to -isolate/migrate them -- we will need a different mechanism for the CMA -area *owner* to indicate that it actually already allocated the page and -is fine with longterm pinning it. The LRU flag is not suitable for that. - -Probably we can lookup the relevant CMA area and query the bitmap; we only -have have to care about some races, probably. If already allocated, we -could just allow longterm pinning) - -Anyhow, let's fix the "must not be longterm pinned" problem first by -reverting the original commit. - -Link: https://lkml.kernel.org/r/20250611131314.594529-1-david@redhat.com -Fixes: 1aaf8c122918 ("mm: gup: fix infinite loop within __get_longterm_locked") -Signed-off-by: David Hildenbrand -Closes: https://lore.kernel.org/all/20250522092755.GA3277597@tiffany/ -Reported-by: Hyesoo Yu -Reviewed-by: John Hubbard -Cc: Jason Gunthorpe -Cc: Peter Xu -Cc: Zhaoyang Huang -Cc: Aijun Sun -Cc: Alistair Popple -Cc: -Signed-off-by: Andrew Morton ---- - mm/gup.c | 14 ++++++++++---- - 1 file changed, 10 insertions(+), 4 deletions(-) - ---- a/mm/gup.c -+++ b/mm/gup.c -@@ -2320,13 +2320,13 @@ static void pofs_unpin(struct pages_or_f - /* - * Returns the number of collected folios. Return value is always >= 0. - */ --static void collect_longterm_unpinnable_folios( -+static unsigned long collect_longterm_unpinnable_folios( - struct list_head *movable_folio_list, - struct pages_or_folios *pofs) - { -+ unsigned long i, collected = 0; - struct folio *prev_folio = NULL; - bool drain_allow = true; -- unsigned long i; - - for (i = 0; i < pofs->nr_entries; i++) { - struct folio *folio = pofs_get_folio(pofs, i); -@@ -2338,6 +2338,8 @@ static void collect_longterm_unpinnable_ - if (folio_is_longterm_pinnable(folio)) - continue; - -+ collected++; -+ - if (folio_is_device_coherent(folio)) - continue; - -@@ -2359,6 +2361,8 @@ static void collect_longterm_unpinnable_ - NR_ISOLATED_ANON + folio_is_file_lru(folio), - folio_nr_pages(folio)); - } -+ -+ return collected; - } - - /* -@@ -2435,9 +2439,11 @@ static long - check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) - { - LIST_HEAD(movable_folio_list); -+ unsigned long collected; - -- collect_longterm_unpinnable_folios(&movable_folio_list, pofs); -- if (list_empty(&movable_folio_list)) -+ collected = collect_longterm_unpinnable_folios(&movable_folio_list, -+ pofs); -+ if (!collected) - return 0; - - return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs); diff --git a/debian/patches/patchset-pf/fixes/0008-mm-hugetlb-remove-unnecessary-holding-of-hugetlb_loc.patch b/debian/patches/patchset-pf/fixes/0008-mm-hugetlb-remove-unnecessary-holding-of-hugetlb_loc.patch new file mode 100644 index 0000000..4deec12 --- /dev/null +++ b/debian/patches/patchset-pf/fixes/0008-mm-hugetlb-remove-unnecessary-holding-of-hugetlb_loc.patch @@ -0,0 +1,154 @@ +From 2e50b415d59dda319bb3208c5ed5234a23f307e9 Mon Sep 17 00:00:00 2001 +From: Ge Yang +Date: Tue, 27 May 2025 11:36:50 +0800 +Subject: mm/hugetlb: remove unnecessary holding of hugetlb_lock + +In isolate_or_dissolve_huge_folio(), after acquiring the hugetlb_lock, it +is only for the purpose of obtaining the correct hstate, which is then +passed to alloc_and_dissolve_hugetlb_folio(). + +alloc_and_dissolve_hugetlb_folio() itself also acquires the hugetlb_lock. +We can have alloc_and_dissolve_hugetlb_folio() obtain the hstate by +itself, so that isolate_or_dissolve_huge_folio() no longer needs to +acquire the hugetlb_lock. In addition, we keep the folio_test_hugetlb() +check within isolate_or_dissolve_huge_folio(). By doing so, we can avoid +disrupting the normal path by vainly holding the hugetlb_lock. + +replace_free_hugepage_folios() has the same issue, and we should address +it as well. + +Addresses a possible performance problem which was added by the hotfix +113ed54ad276 ("mm/hugetlb: fix kernel NULL pointer dereference when +replacing free hugetlb folios"). + +Link: https://lkml.kernel.org/r/1748317010-16272-1-git-send-email-yangge1116@126.com +Fixes: 113ed54ad276 ("mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios") +Signed-off-by: Ge Yang +Suggested-by: Oscar Salvador +Reviewed-by: Muchun Song +Cc: Baolin Wang +Cc: Barry Song <21cnbao@gmail.com> +Cc: David Hildenbrand +Cc: +Signed-off-by: Andrew Morton +--- + mm/hugetlb.c | 54 +++++++++++++++++----------------------------------- + 1 file changed, 17 insertions(+), 37 deletions(-) + +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2811,20 +2811,24 @@ void restore_reserve_on_error(struct hst + /* + * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve + * the old one +- * @h: struct hstate old page belongs to + * @old_folio: Old folio to dissolve + * @list: List to isolate the page in case we need to + * Returns 0 on success, otherwise negated error. + */ +-static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, +- struct folio *old_folio, struct list_head *list) ++static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio, ++ struct list_head *list) + { +- gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; ++ gfp_t gfp_mask; ++ struct hstate *h; + int nid = folio_nid(old_folio); + struct folio *new_folio = NULL; + int ret = 0; + + retry: ++ /* ++ * The old_folio might have been dissolved from under our feet, so make sure ++ * to carefully check the state under the lock. ++ */ + spin_lock_irq(&hugetlb_lock); + if (!folio_test_hugetlb(old_folio)) { + /* +@@ -2853,8 +2857,10 @@ retry: + cond_resched(); + goto retry; + } else { ++ h = folio_hstate(old_folio); + if (!new_folio) { + spin_unlock_irq(&hugetlb_lock); ++ gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; + new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, + NULL, NULL); + if (!new_folio) +@@ -2898,35 +2904,24 @@ free_new: + + int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list) + { +- struct hstate *h; + int ret = -EBUSY; + +- /* +- * The page might have been dissolved from under our feet, so make sure +- * to carefully check the state under the lock. +- * Return success when racing as if we dissolved the page ourselves. +- */ +- spin_lock_irq(&hugetlb_lock); +- if (folio_test_hugetlb(folio)) { +- h = folio_hstate(folio); +- } else { +- spin_unlock_irq(&hugetlb_lock); ++ /* Not to disrupt normal path by vainly holding hugetlb_lock */ ++ if (!folio_test_hugetlb(folio)) + return 0; +- } +- spin_unlock_irq(&hugetlb_lock); + + /* + * Fence off gigantic pages as there is a cyclic dependency between + * alloc_contig_range and them. Return -ENOMEM as this has the effect + * of bailing out right away without further retrying. + */ +- if (hstate_is_gigantic(h)) ++ if (folio_order(folio) > MAX_PAGE_ORDER) + return -ENOMEM; + + if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) + ret = 0; + else if (!folio_ref_count(folio)) +- ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); ++ ret = alloc_and_dissolve_hugetlb_folio(folio, list); + + return ret; + } +@@ -2940,7 +2935,6 @@ int isolate_or_dissolve_huge_folio(struc + */ + int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) + { +- struct hstate *h; + struct folio *folio; + int ret = 0; + +@@ -2949,23 +2943,9 @@ int replace_free_hugepage_folios(unsigne + while (start_pfn < end_pfn) { + folio = pfn_folio(start_pfn); + +- /* +- * The folio might have been dissolved from under our feet, so make sure +- * to carefully check the state under the lock. +- */ +- spin_lock_irq(&hugetlb_lock); +- if (folio_test_hugetlb(folio)) { +- h = folio_hstate(folio); +- } else { +- spin_unlock_irq(&hugetlb_lock); +- start_pfn++; +- continue; +- } +- spin_unlock_irq(&hugetlb_lock); +- +- if (!folio_ref_count(folio)) { +- ret = alloc_and_dissolve_hugetlb_folio(h, folio, +- &isolate_list); ++ /* Not to disrupt normal path by vainly holding hugetlb_lock */ ++ if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) { ++ ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list); + if (ret) + break; + diff --git a/debian/patches/patchset-pf/fixes/0009-anon_inode-rework-assertions.patch b/debian/patches/patchset-pf/fixes/0009-anon_inode-rework-assertions.patch new file mode 100644 index 0000000..2f16454 --- /dev/null +++ b/debian/patches/patchset-pf/fixes/0009-anon_inode-rework-assertions.patch @@ -0,0 +1,81 @@ +From 1e0bf201a90df1058f012f12adcc454d4d7c9a69 Mon Sep 17 00:00:00 2001 +From: Christian Brauner +Date: Wed, 2 Jul 2025 11:23:55 +0200 +Subject: anon_inode: rework assertions + +Making anonymous inodes regular files comes with a lot of risk and +regression potential as evidenced by a recent hickup in io_uring. We're +better of continuing to not have them be regular files. Since we have +S_ANON_INODE we can port all of our assertions easily. + +Link: https://lore.kernel.org/20250702-work-fixes-v1-1-ff76ea589e33@kernel.org +Fixes: cfd86ef7e8e7 ("anon_inode: use a proper mode internally") +Acked-by: Jens Axboe +Cc: stable@kernel.org +Reported-by: Jens Axboe +Signed-off-by: Christian Brauner +--- + fs/exec.c | 9 +++++++-- + fs/libfs.c | 8 +++----- + fs/namei.c | 2 +- + 3 files changed, 11 insertions(+), 8 deletions(-) + +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -111,6 +111,9 @@ static inline void put_binfmt(struct lin + + bool path_noexec(const struct path *path) + { ++ /* If it's an anonymous inode make sure that we catch any shenanigans. */ ++ VFS_WARN_ON_ONCE(IS_ANON_FILE(d_inode(path->dentry)) && ++ !(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC)); + return (path->mnt->mnt_flags & MNT_NOEXEC) || + (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC); + } +@@ -894,13 +897,15 @@ static struct file *do_open_execat(int f + if (IS_ERR(file)) + return file; + ++ if (path_noexec(&file->f_path)) ++ return ERR_PTR(-EACCES); ++ + /* + * In the past the regular type check was here. It moved to may_open() in + * 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is + * an invariant that all non-regular files error out before we get here. + */ +- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) || +- path_noexec(&file->f_path)) ++ if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode))) + return ERR_PTR(-EACCES); + + err = exe_file_deny_write_access(file); +--- a/fs/libfs.c ++++ b/fs/libfs.c +@@ -1648,12 +1648,10 @@ struct inode *alloc_anon_inode(struct su + */ + inode->i_state = I_DIRTY; + /* +- * Historically anonymous inodes didn't have a type at all and +- * userspace has come to rely on this. Internally they're just +- * regular files but S_IFREG is masked off when reporting +- * information to userspace. ++ * Historically anonymous inodes don't have a type at all and ++ * userspace has come to rely on this. + */ +- inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR; ++ inode->i_mode = S_IRUSR | S_IWUSR; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_flags |= S_PRIVATE | S_ANON_INODE; +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -3464,7 +3464,7 @@ static int may_open(struct mnt_idmap *id + return -EACCES; + break; + default: +- VFS_BUG_ON_INODE(1, inode); ++ VFS_BUG_ON_INODE(!IS_ANON_FILE(inode), inode); + } + + error = inode_permission(idmap, inode, MAY_OPEN | acc_mode); diff --git a/debian/patches/patchset-pf/fixes/0009-mm-userfaultfd-fix-race-of-userfaultfd_move-and-swap.patch b/debian/patches/patchset-pf/fixes/0009-mm-userfaultfd-fix-race-of-userfaultfd_move-and-swap.patch deleted file mode 100644 index 56dfa1b..0000000 --- a/debian/patches/patchset-pf/fixes/0009-mm-userfaultfd-fix-race-of-userfaultfd_move-and-swap.patch +++ /dev/null @@ -1,191 +0,0 @@ -From 7ebf89b788aa5b83897e99ad6e3dd6e0cb0f5030 Mon Sep 17 00:00:00 2001 -From: Kairui Song -Date: Wed, 4 Jun 2025 23:10:38 +0800 -Subject: mm: userfaultfd: fix race of userfaultfd_move and swap cache - -This commit fixes two kinds of races, they may have different results: - -Barry reported a BUG_ON in commit c50f8e6053b0, we may see the same -BUG_ON if the filemap lookup returned NULL and folio is added to swap -cache after that. - -If another kind of race is triggered (folio changed after lookup) we -may see RSS counter is corrupted: - -[ 406.893936] BUG: Bad rss-counter state mm:ffff0000c5a9ddc0 -type:MM_ANONPAGES val:-1 -[ 406.894071] BUG: Bad rss-counter state mm:ffff0000c5a9ddc0 -type:MM_SHMEMPAGES val:1 - -Because the folio is being accounted to the wrong VMA. - -I'm not sure if there will be any data corruption though, seems no. -The issues above are critical already. - - -On seeing a swap entry PTE, userfaultfd_move does a lockless swap cache -lookup, and tries to move the found folio to the faulting vma. Currently, -it relies on checking the PTE value to ensure that the moved folio still -belongs to the src swap entry and that no new folio has been added to the -swap cache, which turns out to be unreliable. - -While working and reviewing the swap table series with Barry, following -existing races are observed and reproduced [1]: - -In the example below, move_pages_pte is moving src_pte to dst_pte, where -src_pte is a swap entry PTE holding swap entry S1, and S1 is not in the -swap cache: - -CPU1 CPU2 -userfaultfd_move - move_pages_pte() - entry = pte_to_swp_entry(orig_src_pte); - // Here it got entry = S1 - ... < interrupted> ... - - // folio A is a new allocated folio - // and get installed into src_pte - - // src_pte now points to folio A, S1 - // has swap count == 0, it can be freed - // by folio_swap_swap or swap - // allocator's reclaim. - - // folio B is a folio in another VMA. - - // S1 is freed, folio B can use it - // for swap out with no problem. - ... - folio = filemap_get_folio(S1) - // Got folio B here !!! - ... < interrupted again> ... - - // Now S1 is free to be used again. - - // Now src_pte is a swap entry PTE - // holding S1 again. - folio_trylock(folio) - move_swap_pte - double_pt_lock - is_pte_pages_stable - // Check passed because src_pte == S1 - folio_move_anon_rmap(...) - // Moved invalid folio B here !!! - -The race window is very short and requires multiple collisions of multiple -rare events, so it's very unlikely to happen, but with a deliberately -constructed reproducer and increased time window, it can be reproduced -easily. - -This can be fixed by checking if the folio returned by filemap is the -valid swap cache folio after acquiring the folio lock. - -Another similar race is possible: filemap_get_folio may return NULL, but -folio (A) could be swapped in and then swapped out again using the same -swap entry after the lookup. In such a case, folio (A) may remain in the -swap cache, so it must be moved too: - -CPU1 CPU2 -userfaultfd_move - move_pages_pte() - entry = pte_to_swp_entry(orig_src_pte); - // Here it got entry = S1, and S1 is not in swap cache - folio = filemap_get_folio(S1) - // Got NULL - ... < interrupted again> ... - - - move_swap_pte - double_pt_lock - is_pte_pages_stable - // Check passed because src_pte == S1 - folio_move_anon_rmap(...) - // folio A is ignored !!! - -Fix this by checking the swap cache again after acquiring the src_pte -lock. And to avoid the filemap overhead, we check swap_map directly [2]. - -The SWP_SYNCHRONOUS_IO path does make the problem more complex, but so far -we don't need to worry about that, since folios can only be exposed to the -swap cache in the swap out path, and this is covered in this patch by -checking the swap cache again after acquiring the src_pte lock. - -Testing with a simple C program that allocates and moves several GB of -memory did not show any observable performance change. - -Link: https://lkml.kernel.org/r/20250604151038.21968-1-ryncsn@gmail.com -Fixes: adef440691ba ("userfaultfd: UFFDIO_MOVE uABI") -Signed-off-by: Kairui Song -Closes: https://lore.kernel.org/linux-mm/CAMgjq7B1K=6OOrK2OUZ0-tqCzi+EJt+2_K97TPGoSt=9+JwP7Q@mail.gmail.com/ [1] -Link: https://lore.kernel.org/all/CAGsJ_4yJhJBo16XhiC-nUzSheyX-V3-nFE+tAi=8Y560K8eT=A@mail.gmail.com/ [2] -Reviewed-by: Lokesh Gidra -Acked-by: Peter Xu -Reviewed-by: Suren Baghdasaryan -Reviewed-by: Barry Song -Reviewed-by: Chris Li -Cc: Andrea Arcangeli -Cc: David Hildenbrand -Cc: Kairui Song -Cc: -Signed-off-by: Andrew Morton ---- - mm/userfaultfd.c | 33 +++++++++++++++++++++++++++++++-- - 1 file changed, 31 insertions(+), 2 deletions(-) - ---- a/mm/userfaultfd.c -+++ b/mm/userfaultfd.c -@@ -1084,8 +1084,18 @@ static int move_swap_pte(struct mm_struc - pte_t orig_dst_pte, pte_t orig_src_pte, - pmd_t *dst_pmd, pmd_t dst_pmdval, - spinlock_t *dst_ptl, spinlock_t *src_ptl, -- struct folio *src_folio) -+ struct folio *src_folio, -+ struct swap_info_struct *si, swp_entry_t entry) - { -+ /* -+ * Check if the folio still belongs to the target swap entry after -+ * acquiring the lock. Folio can be freed in the swap cache while -+ * not locked. -+ */ -+ if (src_folio && unlikely(!folio_test_swapcache(src_folio) || -+ entry.val != src_folio->swap.val)) -+ return -EAGAIN; -+ - double_pt_lock(dst_ptl, src_ptl); - - if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte, -@@ -1102,6 +1112,25 @@ static int move_swap_pte(struct mm_struc - if (src_folio) { - folio_move_anon_rmap(src_folio, dst_vma); - src_folio->index = linear_page_index(dst_vma, dst_addr); -+ } else { -+ /* -+ * Check if the swap entry is cached after acquiring the src_pte -+ * lock. Otherwise, we might miss a newly loaded swap cache folio. -+ * -+ * Check swap_map directly to minimize overhead, READ_ONCE is sufficient. -+ * We are trying to catch newly added swap cache, the only possible case is -+ * when a folio is swapped in and out again staying in swap cache, using the -+ * same entry before the PTE check above. The PTL is acquired and released -+ * twice, each time after updating the swap_map's flag. So holding -+ * the PTL here ensures we see the updated value. False positive is possible, -+ * e.g. SWP_SYNCHRONOUS_IO swapin may set the flag without touching the -+ * cache, or during the tiny synchronization window between swap cache and -+ * swap_map, but it will be gone very quickly, worst result is retry jitters. -+ */ -+ if (READ_ONCE(si->swap_map[swp_offset(entry)]) & SWAP_HAS_CACHE) { -+ double_pt_unlock(dst_ptl, src_ptl); -+ return -EAGAIN; -+ } - } - - orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); -@@ -1412,7 +1441,7 @@ retry: - } - err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte, - orig_dst_pte, orig_src_pte, dst_pmd, dst_pmdval, -- dst_ptl, src_ptl, src_folio); -+ dst_ptl, src_ptl, src_folio, si, entry); - } - - out: diff --git a/debian/patches/patchset-pf/fixes/0010-dm-raid-fix-variable-in-journal-device-check.patch b/debian/patches/patchset-pf/fixes/0010-dm-raid-fix-variable-in-journal-device-check.patch deleted file mode 100644 index 106544b..0000000 --- a/debian/patches/patchset-pf/fixes/0010-dm-raid-fix-variable-in-journal-device-check.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 222985dcb732fae554af5276f44c30d648a1d05b Mon Sep 17 00:00:00 2001 -From: Heinz Mauelshagen -Date: Tue, 10 Jun 2025 20:53:30 +0200 -Subject: dm-raid: fix variable in journal device check - -Replace "rdev" with correct loop variable name "r". - -Signed-off-by: Heinz Mauelshagen -Cc: stable@vger.kernel.org -Fixes: 63c32ed4afc2 ("dm raid: add raid4/5/6 journaling support") -Signed-off-by: Mikulas Patocka ---- - drivers/md/dm-raid.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/md/dm-raid.c -+++ b/drivers/md/dm-raid.c -@@ -2410,7 +2410,7 @@ static int super_init_validation(struct - */ - sb_retrieve_failed_devices(sb, failed_devices); - rdev_for_each(r, mddev) { -- if (test_bit(Journal, &rdev->flags) || -+ if (test_bit(Journal, &r->flags) || - !r->sb_page) - continue; - sb2 = page_address(r->sb_page); diff --git a/debian/patches/patchset-pf/smb/0001-cifs-all-initializations-for-tcon-should-happen-in-t.patch b/debian/patches/patchset-pf/smb/0001-cifs-all-initializations-for-tcon-should-happen-in-t.patch new file mode 100644 index 0000000..622211c --- /dev/null +++ b/debian/patches/patchset-pf/smb/0001-cifs-all-initializations-for-tcon-should-happen-in-t.patch @@ -0,0 +1,77 @@ +From 57fd039971b09ce2e6a442f822146099f72888c3 Mon Sep 17 00:00:00 2001 +From: Shyam Prasad N +Date: Mon, 30 Jun 2025 23:09:34 +0530 +Subject: cifs: all initializations for tcon should happen in tcon_info_alloc + +Today, a few work structs inside tcon are initialized inside +cifs_get_tcon and not in tcon_info_alloc. As a result, if a tcon +is obtained from tcon_info_alloc, but not called as a part of +cifs_get_tcon, we may trip over. + +Cc: +Signed-off-by: Shyam Prasad N +Reviewed-by: Paulo Alcantara (Red Hat) +Signed-off-by: Steve French +--- + fs/smb/client/cifsproto.h | 1 + + fs/smb/client/connect.c | 8 +------- + fs/smb/client/misc.c | 6 ++++++ + 3 files changed, 8 insertions(+), 7 deletions(-) + +--- a/fs/smb/client/cifsproto.h ++++ b/fs/smb/client/cifsproto.h +@@ -136,6 +136,7 @@ extern int SendReceiveBlockingLock(const + struct smb_hdr *out_buf, + int *bytes_returned); + ++void smb2_query_server_interfaces(struct work_struct *work); + void + cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, + bool all_channels); +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -97,7 +97,7 @@ static int reconn_set_ipaddr_from_hostna + return rc; + } + +-static void smb2_query_server_interfaces(struct work_struct *work) ++void smb2_query_server_interfaces(struct work_struct *work) + { + int rc; + int xid; +@@ -2880,20 +2880,14 @@ cifs_get_tcon(struct cifs_ses *ses, stru + tcon->max_cached_dirs = ctx->max_cached_dirs; + tcon->nodelete = ctx->nodelete; + tcon->local_lease = ctx->local_lease; +- INIT_LIST_HEAD(&tcon->pending_opens); + tcon->status = TID_GOOD; + +- INIT_DELAYED_WORK(&tcon->query_interfaces, +- smb2_query_server_interfaces); + if (ses->server->dialect >= SMB30_PROT_ID && + (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { + /* schedule query interfaces poll */ + queue_delayed_work(cifsiod_wq, &tcon->query_interfaces, + (SMB_INTERFACE_POLL_INTERVAL * HZ)); + } +-#ifdef CONFIG_CIFS_DFS_UPCALL +- INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh); +-#endif + spin_lock(&cifs_tcp_ses_lock); + list_add(&tcon->tcon_list, &ses->tcon_list); + spin_unlock(&cifs_tcp_ses_lock); +--- a/fs/smb/client/misc.c ++++ b/fs/smb/client/misc.c +@@ -151,6 +151,12 @@ tcon_info_alloc(bool dir_leases_enabled, + #ifdef CONFIG_CIFS_DFS_UPCALL + INIT_LIST_HEAD(&ret_buf->dfs_ses_list); + #endif ++ INIT_LIST_HEAD(&ret_buf->pending_opens); ++ INIT_DELAYED_WORK(&ret_buf->query_interfaces, ++ smb2_query_server_interfaces); ++#ifdef CONFIG_CIFS_DFS_UPCALL ++ INIT_DELAYED_WORK(&ret_buf->dfs_cache_work, dfs_cache_refresh); ++#endif + + return ret_buf; + } diff --git a/debian/patches/series b/debian/patches/series index c962ea4..fb904c7 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -69,6 +69,7 @@ features/x86/x86-make-x32-syscall-support-conditional.patch # Miscellaneous bug fixes bugfix/all/disable-some-marvell-phys.patch bugfix/all/fs-add-module_softdep-declarations-for-hard-coded-cr.patch +bugfix/all/Revert-mmc-sdhci-Disable-SD-card-clock-before-changi.patch # Miscellaneous features @@ -139,6 +140,8 @@ patchset-pf/cpuidle/0001-cpuidle-Prefer-teo-over-menu-governor.patch patchset-pf/kbuild/0001-ice-mark-ice_write_prof_mask_reg-as-noinline.patch patchset-pf/kbuild/0002-wifi-mac80211-mark-copy_mesh_setup-as-noinline.patch +patchset-pf/smb/0001-cifs-all-initializations-for-tcon-should-happen-in-t.patch + patchset-xanmod/binder/0001-binder-turn-into-module.patch patchset-xanmod/clearlinux/0001-sched-wait-Do-accept-in-LIFO-order-for-cache-efficie.patch @@ -226,10 +229,9 @@ patchset-pf/fixes/0003-mm-filemap-unify-dropbehind-flag-testing-and-clearin.patc patchset-pf/fixes/0004-mm-khugepaged-fix-race-with-folio-split-free-using-t.patch patchset-pf/fixes/0005-mm-add-folio_expected_ref_count-for-reference-count-.patch patchset-pf/fixes/0006-drm-i915-snps_hdmi_pll-Fix-64-bit-divisor-truncation.patch -patchset-pf/fixes/0007-mm-shmem-swap-fix-softlockup-with-mTHP-swapin.patch -patchset-pf/fixes/0008-mm-gup-revert-mm-gup-fix-infinite-loop-within-__get_.patch -patchset-pf/fixes/0009-mm-userfaultfd-fix-race-of-userfaultfd_move-and-swap.patch -patchset-pf/fixes/0010-dm-raid-fix-variable-in-journal-device-check.patch +patchset-pf/fixes/0007-mm-compaction-use-folio-in-hugetlb-pathway.patch +patchset-pf/fixes/0008-mm-hugetlb-remove-unnecessary-holding-of-hugetlb_loc.patch +patchset-pf/fixes/0009-anon_inode-rework-assertions.patch patchset-zen/fixes/0001-drivers-firmware-skip-simpledrm-if-nvidia-drm.modese.patch patchset-zen/fixes/0002-x86-cpu-Help-users-notice-when-running-old-Intel-mic.patch diff --git a/debian/rules.d/tools/tracing/rtla/Makefile b/debian/rules.d/tools/tracing/rtla/Makefile index 5f1daf2..dd7d8e8 100644 --- a/debian/rules.d/tools/tracing/rtla/Makefile +++ b/debian/rules.d/tools/tracing/rtla/Makefile @@ -3,7 +3,11 @@ include $(top_rulesdir)/Makefile.inc # Upstream enables LTO by default, but we don't want it CFLAGS += -fno-lto -MAKE_RTLA := +CFLAGS='$(CFLAGS) $(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(MAKE) -C $(top_srcdir)/$(OUTDIR) O=$(CURDIR) +# Use the version of libcpupower that we just built +CPPFLAGS += -I$(top_srcdir)/tools/power/cpupower/lib +LDFLAGS += -L$(CURDIR)/../../power/cpupower + +MAKE_RTLA := +CFLAGS='$(CFLAGS) $(CPPFLAGS)' EXTRA_CFLAGS='$(CFLAGS) $(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(MAKE) -C $(top_srcdir)/$(OUTDIR) O=$(CURDIR) MAKE_RTLA += LD='$(CROSS_COMPILE)ld' MAKE_RTLA += PKG_CONFIG='$(PKG_CONFIG)' MAKE_RTLA += STRIP=true diff --git a/debian/rules.real b/debian/rules.real index 8dd9c95..479b46b 100644 --- a/debian/rules.real +++ b/debian/rules.real @@ -458,11 +458,13 @@ binary_kbuild: build_kbuild dh_link $(PREFIX_DIR) /usr/src/$(PACKAGE_NAME) $(dh_binary_post) -build_cpupower: $(STAMPS_DIR)/build-tools-headers +build_cpupower: $(STAMPS_DIR)/build-cpupower +$(STAMPS_DIR)/build-cpupower: $(STAMPS_DIR)/build-tools-headers $(call make-tools,tools/power/cpupower) ifneq ($(filter i386 amd64 x32,$(DEB_HOST_ARCH)),) $(call make-tools,tools/power/x86) endif + @$(stamp) binary_cpupower: DIR = $(CURDIR)/debian/cpupower-tmp binary_cpupower: DH_INSTALL_ARGS = --sourcedir=$(DIR) @@ -528,7 +530,7 @@ binary_hyperv-daemons: build_hyperv-daemons done $(dh_binary_post) -build_rtla: $(STAMPS_DIR)/build-tools-headers +build_rtla: $(STAMPS_DIR)/build-tools-headers $(STAMPS_DIR)/build-cpupower $(call make-tools,tools/tracing/rtla) binary_rtla: build_rtla