Skip to content

Commit 76b6905

Browse files
committed
Merge tag 'mm-hotfixes-stable-2025-03-17-20-09' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc hotfixes from Andrew Morton: "15 hotfixes. 7 are cc:stable and the remainder address post-6.13 issues or aren't considered necessary for -stable kernels. 13 are for MM and the other two are for squashfs and procfs. All are singletons. Please see the individual changelogs for details" * tag 'mm-hotfixes-stable-2025-03-17-20-09' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/page_alloc: fix memory accept before watermarks gets initialized mm: decline to manipulate the refcount on a slab page memcg: drain obj stock on cpu hotplug teardown mm/huge_memory: drop beyond-EOF folios with the right number of refs selftests/mm: run_vmtests.sh: fix half_ufd_size_MB calculation mm: fix error handling in __filemap_get_folio() with FGP_NOWAIT mm: memcontrol: fix swap counter leak from offline cgroup mm/vma: do not register private-anon mappings with khugepaged during mmap squashfs: fix invalid pointer dereference in squashfs_cache_delete mm/migrate: fix shmem xarray update during migration mm/hugetlb: fix surplus pages in dissolve_free_huge_page() mm/damon/core: initialize damos->walk_completed in damon_new_scheme() mm/damon: respect core layer filters' allowance decision on ops layer filemap: move prefaulting out of hot write path proc: fix UAF in proc_get_inode()
2 parents 9130945 + 800f105 commit 76b6905

20 files changed

+132
-43
lines changed

fs/proc/generic.c

+9-1
Original file line numberDiff line numberDiff line change
@@ -559,10 +559,16 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
559559
return p;
560560
}
561561

562-
static inline void pde_set_flags(struct proc_dir_entry *pde)
562+
static void pde_set_flags(struct proc_dir_entry *pde)
563563
{
564564
if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT)
565565
pde->flags |= PROC_ENTRY_PERMANENT;
566+
if (pde->proc_ops->proc_read_iter)
567+
pde->flags |= PROC_ENTRY_proc_read_iter;
568+
#ifdef CONFIG_COMPAT
569+
if (pde->proc_ops->proc_compat_ioctl)
570+
pde->flags |= PROC_ENTRY_proc_compat_ioctl;
571+
#endif
566572
}
567573

568574
struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
@@ -626,6 +632,7 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
626632
p->proc_ops = &proc_seq_ops;
627633
p->seq_ops = ops;
628634
p->state_size = state_size;
635+
pde_set_flags(p);
629636
return proc_register(parent, p);
630637
}
631638
EXPORT_SYMBOL(proc_create_seq_private);
@@ -656,6 +663,7 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
656663
return NULL;
657664
p->proc_ops = &proc_single_ops;
658665
p->single_show = show;
666+
pde_set_flags(p);
659667
return proc_register(parent, p);
660668
}
661669
EXPORT_SYMBOL(proc_create_single_data);

fs/proc/inode.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -656,13 +656,13 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
656656

657657
if (S_ISREG(inode->i_mode)) {
658658
inode->i_op = de->proc_iops;
659-
if (de->proc_ops->proc_read_iter)
659+
if (pde_has_proc_read_iter(de))
660660
inode->i_fop = &proc_iter_file_ops;
661661
else
662662
inode->i_fop = &proc_reg_file_ops;
663663
#ifdef CONFIG_COMPAT
664-
if (de->proc_ops->proc_compat_ioctl) {
665-
if (de->proc_ops->proc_read_iter)
664+
if (pde_has_proc_compat_ioctl(de)) {
665+
if (pde_has_proc_read_iter(de))
666666
inode->i_fop = &proc_iter_file_ops_compat;
667667
else
668668
inode->i_fop = &proc_reg_file_ops_compat;

fs/proc/internal.h

+14
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,20 @@ static inline void pde_make_permanent(struct proc_dir_entry *pde)
8585
pde->flags |= PROC_ENTRY_PERMANENT;
8686
}
8787

88+
static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde)
89+
{
90+
return pde->flags & PROC_ENTRY_proc_read_iter;
91+
}
92+
93+
static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde)
94+
{
95+
#ifdef CONFIG_COMPAT
96+
return pde->flags & PROC_ENTRY_proc_compat_ioctl;
97+
#else
98+
return false;
99+
#endif
100+
}
101+
88102
extern struct kmem_cache *proc_dir_entry_cache;
89103
void pde_free(struct proc_dir_entry *pde);
90104

fs/squashfs/cache.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
198198
{
199199
int i, j;
200200

201-
if (cache == NULL)
201+
if (IS_ERR(cache) || cache == NULL)
202202
return;
203203

204204
for (i = 0; i < cache->entries; i++) {

include/linux/damon.h

+5
Original file line numberDiff line numberDiff line change
@@ -470,6 +470,11 @@ struct damos {
470470
unsigned long next_apply_sis;
471471
/* informs if ongoing DAMOS walk for this scheme is finished */
472472
bool walk_completed;
473+
/*
474+
* If the current region in the filtering stage is allowed by core
475+
* layer-handled filters. If true, operations layer allows it, too.
476+
*/
477+
bool core_filters_allowed;
473478
/* public: */
474479
struct damos_quota quota;
475480
struct damos_watermarks wmarks;

include/linux/mm.h

+7-1
Original file line numberDiff line numberDiff line change
@@ -1458,7 +1458,10 @@ static inline void folio_get(struct folio *folio)
14581458

14591459
static inline void get_page(struct page *page)
14601460
{
1461-
folio_get(page_folio(page));
1461+
struct folio *folio = page_folio(page);
1462+
if (WARN_ON_ONCE(folio_test_slab(folio)))
1463+
return;
1464+
folio_get(folio);
14621465
}
14631466

14641467
static inline __must_check bool try_get_page(struct page *page)
@@ -1552,6 +1555,9 @@ static inline void put_page(struct page *page)
15521555
{
15531556
struct folio *folio = page_folio(page);
15541557

1558+
if (folio_test_slab(folio))
1559+
return;
1560+
15551561
/*
15561562
* For some devmap managed pages we need to catch refcount transition
15571563
* from 2 to 1:

include/linux/proc_fs.h

+5-2
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,13 @@ enum {
2020
* If in doubt, ignore this flag.
2121
*/
2222
#ifdef MODULE
23-
PROC_ENTRY_PERMANENT = 0U,
23+
PROC_ENTRY_PERMANENT = 0U,
2424
#else
25-
PROC_ENTRY_PERMANENT = 1U << 0,
25+
PROC_ENTRY_PERMANENT = 1U << 0,
2626
#endif
27+
28+
PROC_ENTRY_proc_read_iter = 1U << 1,
29+
PROC_ENTRY_proc_compat_ioctl = 1U << 2,
2730
};
2831

2932
struct proc_ops {

include/linux/swap_cgroup.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
88

9-
extern void swap_cgroup_record(struct folio *folio, swp_entry_t ent);
9+
extern void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent);
1010
extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents);
1111
extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
1212
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
@@ -15,7 +15,7 @@ extern void swap_cgroup_swapoff(int type);
1515
#else
1616

1717
static inline
18-
void swap_cgroup_record(struct folio *folio, swp_entry_t ent)
18+
void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent)
1919
{
2020
}
2121

lib/iov_iter.c

+6-2
Original file line numberDiff line numberDiff line change
@@ -1190,8 +1190,12 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
11901190
if (!n)
11911191
return -ENOMEM;
11921192
p = *pages;
1193-
for (int k = 0; k < n; k++)
1194-
get_page(p[k] = page + k);
1193+
for (int k = 0; k < n; k++) {
1194+
struct folio *folio = page_folio(page);
1195+
p[k] = page + k;
1196+
if (!folio_test_slab(folio))
1197+
folio_get(folio);
1198+
}
11951199
maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
11961200
i->count -= maxsize;
11971201
i->iov_offset += maxsize;

mm/damon/core.c

+6-1
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
373373
* or damon_attrs are updated.
374374
*/
375375
scheme->next_apply_sis = 0;
376+
scheme->walk_completed = false;
376377
INIT_LIST_HEAD(&scheme->filters);
377378
scheme->stat = (struct damos_stat){};
378379
INIT_LIST_HEAD(&scheme->list);
@@ -1429,9 +1430,13 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
14291430
{
14301431
struct damos_filter *filter;
14311432

1433+
s->core_filters_allowed = false;
14321434
damos_for_each_filter(filter, s) {
1433-
if (damos_filter_match(ctx, t, r, filter))
1435+
if (damos_filter_match(ctx, t, r, filter)) {
1436+
if (filter->allow)
1437+
s->core_filters_allowed = true;
14341438
return !filter->allow;
1439+
}
14351440
}
14361441
return false;
14371442
}

mm/damon/paddr.c

+3
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,9 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
236236
{
237237
struct damos_filter *filter;
238238

239+
if (scheme->core_filters_allowed)
240+
return false;
241+
239242
damos_for_each_filter(filter, scheme) {
240243
if (damos_pa_filter_match(filter, folio))
241244
return !filter->allow;

mm/filemap.c

+28-12
Original file line numberDiff line numberDiff line change
@@ -1985,8 +1985,19 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
19851985

19861986
if (err == -EEXIST)
19871987
goto repeat;
1988-
if (err)
1988+
if (err) {
1989+
/*
1990+
* When NOWAIT I/O fails to allocate folios this could
1991+
* be due to a nonblocking memory allocation and not
1992+
* because the system actually is out of memory.
1993+
* Return -EAGAIN so that there caller retries in a
1994+
* blocking fashion instead of propagating -ENOMEM
1995+
* to the application.
1996+
*/
1997+
if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM)
1998+
err = -EAGAIN;
19891999
return ERR_PTR(err);
2000+
}
19902001
/*
19912002
* filemap_add_folio locks the page, and for mmap
19922003
* we expect an unlocked page.
@@ -4083,17 +4094,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
40834094
bytes = min(chunk - offset, bytes);
40844095
balance_dirty_pages_ratelimited(mapping);
40854096

4086-
/*
4087-
* Bring in the user page that we will copy from _first_.
4088-
* Otherwise there's a nasty deadlock on copying from the
4089-
* same page as we're writing to, without it being marked
4090-
* up-to-date.
4091-
*/
4092-
if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
4093-
status = -EFAULT;
4094-
break;
4095-
}
4096-
40974097
if (fatal_signal_pending(current)) {
40984098
status = -EINTR;
40994099
break;
@@ -4111,6 +4111,12 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
41114111
if (mapping_writably_mapped(mapping))
41124112
flush_dcache_folio(folio);
41134113

4114+
/*
4115+
* Faults here on mmap()s can recurse into arbitrary
4116+
* filesystem code. Lots of locks are held that can
4117+
* deadlock. Use an atomic copy to avoid deadlocking
4118+
* in page fault handling.
4119+
*/
41144120
copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
41154121
flush_dcache_folio(folio);
41164122

@@ -4136,6 +4142,16 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
41364142
bytes = copied;
41374143
goto retry;
41384144
}
4145+
4146+
/*
4147+
* 'folio' is now unlocked and faults on it can be
4148+
* handled. Ensure forward progress by trying to
4149+
* fault it in now.
4150+
*/
4151+
if (fault_in_iov_iter_readable(i, bytes) == bytes) {
4152+
status = -EFAULT;
4153+
break;
4154+
}
41394155
} else {
41404156
pos += status;
41414157
written += status;

mm/huge_memory.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -3304,7 +3304,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
33043304
folio_account_cleaned(tail,
33053305
inode_to_wb(folio->mapping->host));
33063306
__filemap_remove_folio(tail, NULL);
3307-
folio_put(tail);
3307+
folio_put_refs(tail, folio_nr_pages(tail));
33083308
} else if (!folio_test_anon(folio)) {
33093309
__xa_store(&folio->mapping->i_pages, tail->index,
33103310
tail, 0);

mm/hugetlb.c

+6-2
Original file line numberDiff line numberDiff line change
@@ -2135,6 +2135,8 @@ int dissolve_free_hugetlb_folio(struct folio *folio)
21352135

21362136
if (!folio_ref_count(folio)) {
21372137
struct hstate *h = folio_hstate(folio);
2138+
bool adjust_surplus = false;
2139+
21382140
if (!available_huge_pages(h))
21392141
goto out;
21402142

@@ -2157,7 +2159,9 @@ int dissolve_free_hugetlb_folio(struct folio *folio)
21572159
goto retry;
21582160
}
21592161

2160-
remove_hugetlb_folio(h, folio, false);
2162+
if (h->surplus_huge_pages_node[folio_nid(folio)])
2163+
adjust_surplus = true;
2164+
remove_hugetlb_folio(h, folio, adjust_surplus);
21612165
h->max_huge_pages--;
21622166
spin_unlock_irq(&hugetlb_lock);
21632167

@@ -2177,7 +2181,7 @@ int dissolve_free_hugetlb_folio(struct folio *folio)
21772181
rc = hugetlb_vmemmap_restore_folio(h, folio);
21782182
if (rc) {
21792183
spin_lock_irq(&hugetlb_lock);
2180-
add_hugetlb_folio(h, folio, false);
2184+
add_hugetlb_folio(h, folio, adjust_surplus);
21812185
h->max_huge_pages++;
21822186
goto out;
21832187
}

mm/memcontrol.c

+11-2
Original file line numberDiff line numberDiff line change
@@ -1921,9 +1921,18 @@ void drain_all_stock(struct mem_cgroup *root_memcg)
19211921
static int memcg_hotplug_cpu_dead(unsigned int cpu)
19221922
{
19231923
struct memcg_stock_pcp *stock;
1924+
struct obj_cgroup *old;
1925+
unsigned long flags;
19241926

19251927
stock = &per_cpu(memcg_stock, cpu);
1928+
1929+
/* drain_obj_stock requires stock_lock */
1930+
local_lock_irqsave(&memcg_stock.stock_lock, flags);
1931+
old = drain_obj_stock(stock);
1932+
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1933+
19261934
drain_stock(stock);
1935+
obj_cgroup_put(old);
19271936

19281937
return 0;
19291938
}
@@ -4993,7 +5002,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
49935002
mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
49945003
mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
49955004

4996-
swap_cgroup_record(folio, entry);
5005+
swap_cgroup_record(folio, mem_cgroup_id(swap_memcg), entry);
49975006

49985007
folio_unqueue_deferred_split(folio);
49995008
folio->memcg_data = 0;
@@ -5055,7 +5064,7 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
50555064
mem_cgroup_id_get_many(memcg, nr_pages - 1);
50565065
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
50575066

5058-
swap_cgroup_record(folio, entry);
5067+
swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
50595068

50605069
return 0;
50615070
}

mm/migrate.c

+4-6
Original file line numberDiff line numberDiff line change
@@ -518,15 +518,13 @@ static int __folio_migrate_mapping(struct address_space *mapping,
518518
if (folio_test_anon(folio) && folio_test_large(folio))
519519
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
520520
folio_ref_add(newfolio, nr); /* add cache reference */
521-
if (folio_test_swapbacked(folio)) {
521+
if (folio_test_swapbacked(folio))
522522
__folio_set_swapbacked(newfolio);
523-
if (folio_test_swapcache(folio)) {
524-
folio_set_swapcache(newfolio);
525-
newfolio->private = folio_get_private(folio);
526-
}
523+
if (folio_test_swapcache(folio)) {
524+
folio_set_swapcache(newfolio);
525+
newfolio->private = folio_get_private(folio);
527526
entries = nr;
528527
} else {
529-
VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
530528
entries = 1;
531529
}
532530

mm/page_alloc.c

+12-2
Original file line numberDiff line numberDiff line change
@@ -7004,7 +7004,7 @@ static inline bool has_unaccepted_memory(void)
70047004

70057005
static bool cond_accept_memory(struct zone *zone, unsigned int order)
70067006
{
7007-
long to_accept;
7007+
long to_accept, wmark;
70087008
bool ret = false;
70097009

70107010
if (!has_unaccepted_memory())
@@ -7013,8 +7013,18 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
70137013
if (list_empty(&zone->unaccepted_pages))
70147014
return false;
70157015

7016+
wmark = promo_wmark_pages(zone);
7017+
7018+
/*
7019+
* Watermarks have not been initialized yet.
7020+
*
7021+
* Accepting one MAX_ORDER page to ensure progress.
7022+
*/
7023+
if (!wmark)
7024+
return try_to_accept_memory_one(zone);
7025+
70167026
/* How much to accept to get to promo watermark? */
7017-
to_accept = promo_wmark_pages(zone) -
7027+
to_accept = wmark -
70187028
(zone_page_state(zone, NR_FREE_PAGES) -
70197029
__zone_watermark_unusable_free(zone, order, 0) -
70207030
zone_page_state(zone, NR_UNACCEPTED));

0 commit comments

Comments
 (0)