Skip to content

Commit 4f82870

Browse files
committed
Merge tag 'mm-hotfixes-stable-2023-10-24-09-40' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "20 hotfixes. 12 are cc:stable and the remainder address post-6.5 issues or aren't considered necessary for earlier kernel versions" * tag 'mm-hotfixes-stable-2023-10-24-09-40' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: maple_tree: add GFP_KERNEL to allocations in mas_expected_entries() selftests/mm: include mman header to access MREMAP_DONTUNMAP identifier mailmap: correct email aliasing for Oleksij Rempel mailmap: map Bartosz's old address to the current one mm/damon/sysfs: check DAMOS regions update progress from before_terminate() MAINTAINERS: Ondrej has moved kasan: disable kasan_non_canonical_hook() for HW tags kasan: print the original fault addr when access invalid shadow hugetlbfs: close race between MADV_DONTNEED and page fault hugetlbfs: extend hugetlb_vma_lock to private VMAs hugetlbfs: clear resv_map pointer if mmap fails mm: zswap: fix pool refcount bug around shrink_worker() mm/migrate: fix do_pages_move for compat pointers riscv: fix set_huge_pte_at() for NAPOT mappings when a swap entry is set riscv: handle VM_FAULT_[HWPOISON|HWPOISON_LARGE] faults instead of panicking mmap: fix error paths with dup_anon_vma() mmap: fix vma_iterator in error path of vma_merge() mm: fix vm_brk_flags() to not bail out while holding lock mm/mempolicy: fix set_mempolicy_home_node() previous VMA pointer mm/page_alloc: correct start page when guard page debug is enabled
2 parents d88520a + 099d743 commit 4f82870

File tree

19 files changed

+255
-73
lines changed

19 files changed

+255
-73
lines changed

.mailmap

+4-2
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ Baolin Wang <[email protected]> <[email protected]>
8787
8888
8989
90+
Bartosz Golaszewski <[email protected]> <[email protected]>
9091
9192
9293
Ben Gardner <[email protected]>
@@ -450,9 +451,10 @@ Oleksandr Natalenko <[email protected]> <[email protected]>
450451
451452
452453
453-
454-
Oleksij Rempel <linux@rempel-privat.de> <[email protected]>
454+
Oleksij Rempel <[email protected]>
455+
Oleksij Rempel <o.rempel@pengutronix.de> <[email protected]>
455456
457+
456458
457459
458460
Paolo 'Blaisorblade' Giarrusso <[email protected]>

MAINTAINERS

+1-1
Original file line numberDiff line numberDiff line change
@@ -6766,7 +6766,7 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7701.c
67666766
DRM DRIVER FOR SITRONIX ST7703 PANELS
67676767
M: Guido Günther <[email protected]>
67686768
R: Purism Kernel Team <[email protected]>
6769-
R: Ondrej Jirman <[email protected]>
6769+
R: Ondrej Jirman <[email protected]>
67706770
S: Maintained
67716771
F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
67726772
F: drivers/gpu/drm/panel/panel-sitronix-st7703.c

arch/riscv/mm/fault.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
7272
}
7373
pagefault_out_of_memory();
7474
return;
75-
} else if (fault & VM_FAULT_SIGBUS) {
75+
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
7676
/* Kernel mode? Handle exceptions or die */
7777
if (!user_mode(regs)) {
7878
no_context(regs, addr);

arch/riscv/mm/hugetlbpage.c

+13-6
Original file line numberDiff line numberDiff line change
@@ -183,15 +183,22 @@ void set_huge_pte_at(struct mm_struct *mm,
183183
pte_t pte,
184184
unsigned long sz)
185185
{
186+
unsigned long hugepage_shift;
186187
int i, pte_num;
187188

188-
if (!pte_napot(pte)) {
189-
set_pte_at(mm, addr, ptep, pte);
190-
return;
191-
}
189+
if (sz >= PGDIR_SIZE)
190+
hugepage_shift = PGDIR_SHIFT;
191+
else if (sz >= P4D_SIZE)
192+
hugepage_shift = P4D_SHIFT;
193+
else if (sz >= PUD_SIZE)
194+
hugepage_shift = PUD_SHIFT;
195+
else if (sz >= PMD_SIZE)
196+
hugepage_shift = PMD_SHIFT;
197+
else
198+
hugepage_shift = PAGE_SHIFT;
192199

193-
pte_num = napot_pte_num(napot_cont_order(pte));
194-
for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE)
200+
pte_num = sz >> hugepage_shift;
201+
for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
195202
set_pte_at(mm, addr, ptep, pte);
196203
}
197204

include/linux/hugetlb.h

+39-2
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ struct resv_map {
6060
long adds_in_progress;
6161
struct list_head region_cache;
6262
long region_cache_count;
63+
struct rw_semaphore rw_sema;
6364
#ifdef CONFIG_CGROUP_HUGETLB
6465
/*
6566
* On private mappings, the counter to uncharge reservations is stored
@@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
138139
void unmap_hugepage_range(struct vm_area_struct *,
139140
unsigned long, unsigned long, struct page *,
140141
zap_flags_t);
141-
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
142+
void __unmap_hugepage_range(struct mmu_gather *tlb,
142143
struct vm_area_struct *vma,
143144
unsigned long start, unsigned long end,
144145
struct page *ref_page, zap_flags_t zap_flags);
@@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
245246
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
246247
unsigned long *start, unsigned long *end);
247248

249+
extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
250+
unsigned long *begin, unsigned long *end);
251+
extern void __hugetlb_zap_end(struct vm_area_struct *vma,
252+
struct zap_details *details);
253+
254+
static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
255+
unsigned long *start, unsigned long *end)
256+
{
257+
if (is_vm_hugetlb_page(vma))
258+
__hugetlb_zap_begin(vma, start, end);
259+
}
260+
261+
static inline void hugetlb_zap_end(struct vm_area_struct *vma,
262+
struct zap_details *details)
263+
{
264+
if (is_vm_hugetlb_page(vma))
265+
__hugetlb_zap_end(vma, details);
266+
}
267+
248268
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
249269
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
250270
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -296,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
296316
{
297317
}
298318

319+
static inline void hugetlb_zap_begin(
320+
struct vm_area_struct *vma,
321+
unsigned long *start, unsigned long *end)
322+
{
323+
}
324+
325+
static inline void hugetlb_zap_end(
326+
struct vm_area_struct *vma,
327+
struct zap_details *details)
328+
{
329+
}
330+
299331
static inline struct page *hugetlb_follow_page_mask(
300332
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
301333
unsigned int *page_mask)
@@ -441,7 +473,7 @@ static inline long hugetlb_change_protection(
441473
return 0;
442474
}
443475

444-
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
476+
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
445477
struct vm_area_struct *vma, unsigned long start,
446478
unsigned long end, struct page *ref_page,
447479
zap_flags_t zap_flags)
@@ -1233,6 +1265,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
12331265
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
12341266
}
12351267

1268+
static inline bool __vma_private_lock(struct vm_area_struct *vma)
1269+
{
1270+
return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
1271+
}
1272+
12361273
/*
12371274
* Safe version of huge_pte_offset() to check the locks. See comments
12381275
* above huge_pte_offset().

include/linux/kasan.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -466,10 +466,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
466466

467467
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
468468

469-
#ifdef CONFIG_KASAN_INLINE
469+
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
470470
void kasan_non_canonical_hook(unsigned long addr);
471-
#else /* CONFIG_KASAN_INLINE */
471+
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
472472
static inline void kasan_non_canonical_hook(unsigned long addr) { }
473-
#endif /* CONFIG_KASAN_INLINE */
473+
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
474474

475475
#endif /* LINUX_KASAN_H */

lib/maple_tree.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
56275627
/* Internal nodes */
56285628
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
56295629
/* Add working room for split (2 nodes) + new parents */
5630-
mas_node_count(mas, nr_nodes + 3);
5630+
mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
56315631

56325632
/* Detect if allocations run out */
56335633
mas->mas_flags |= MA_STATE_PREALLOC;

lib/test_maple_tree.c

+24-11
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
#include <linux/maple_tree.h>
1111
#include <linux/module.h>
12+
#include <linux/rwsem.h>
1213

1314
#define MTREE_ALLOC_MAX 0x2000000000000Ul
1415
#define CONFIG_MAPLE_SEARCH
@@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
18411842
void *val;
18421843
MA_STATE(mas, mt, 0, 0);
18431844
MA_STATE(newmas, mt, 0, 0);
1845+
struct rw_semaphore newmt_lock;
1846+
1847+
init_rwsem(&newmt_lock);
18441848

18451849
for (i = 0; i <= nr_entries; i++)
18461850
mtree_store_range(mt, i*10, i*10 + 5,
18471851
xa_mk_value(i), GFP_KERNEL);
18481852

18491853
mt_set_non_kernel(99999);
1850-
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
1854+
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
1855+
mt_set_external_lock(&newmt, &newmt_lock);
18511856
newmas.tree = &newmt;
18521857
mas_reset(&newmas);
18531858
mas_reset(&mas);
1854-
mas_lock(&newmas);
1859+
down_write(&newmt_lock);
18551860
mas.index = 0;
18561861
mas.last = 0;
18571862
if (mas_expected_entries(&newmas, nr_entries)) {
@@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
18661871
}
18671872
rcu_read_unlock();
18681873
mas_destroy(&newmas);
1869-
mas_unlock(&newmas);
18701874
mt_validate(&newmt);
18711875
mt_set_non_kernel(0);
1872-
mtree_destroy(&newmt);
1876+
__mt_destroy(&newmt);
1877+
up_write(&newmt_lock);
18731878
}
18741879

18751880
static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
19801985
void *val;
19811986
MA_STATE(mas, mt, 0, 0);
19821987
MA_STATE(newmas, mt, 0, 0);
1988+
struct rw_semaphore newmt_lock;
1989+
1990+
init_rwsem(&newmt_lock);
1991+
mt_set_external_lock(&newmt, &newmt_lock);
19831992

19841993
for (i = 0; i <= nr_entries; i++)
19851994
mtree_store_range(mt, i*10, i*10 + 5,
@@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
19942003
mas.index = 0;
19952004
mas.last = 0;
19962005
rcu_read_lock();
1997-
mas_lock(&newmas);
2006+
down_write(&newmt_lock);
19982007
if (mas_expected_entries(&newmas, nr_entries)) {
19992008
printk("OOM!");
20002009
BUG_ON(1);
@@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
20052014
mas_store(&newmas, val);
20062015
}
20072016
mas_destroy(&newmas);
2008-
mas_unlock(&newmas);
20092017
rcu_read_unlock();
20102018
mt_validate(&newmt);
20112019
mt_set_non_kernel(0);
2012-
mtree_destroy(&newmt);
2020+
__mt_destroy(&newmt);
2021+
up_write(&newmt_lock);
20132022
}
20142023
}
20152024
#endif
@@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
26162625
void *tmp;
26172626
MA_STATE(mas, mt, 0, 0);
26182627
MA_STATE(newmas, &newmt, 0, 0);
2628+
struct rw_semaphore newmt_lock;
2629+
2630+
init_rwsem(&newmt_lock);
2631+
mt_set_external_lock(&newmt, &newmt_lock);
26192632

26202633
if (!zero_start)
26212634
i = 1;
@@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
26252638
mtree_store_range(mt, i*10, (i+1)*10 - gap,
26262639
xa_mk_value(i), GFP_KERNEL);
26272640

2628-
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
2641+
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
26292642
mt_set_non_kernel(99999);
2630-
mas_lock(&newmas);
2643+
down_write(&newmt_lock);
26312644
ret = mas_expected_entries(&newmas, nr_entries);
26322645
mt_set_non_kernel(0);
26332646
MT_BUG_ON(mt, ret != 0);
@@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
26402653
}
26412654
rcu_read_unlock();
26422655
mas_destroy(&newmas);
2643-
mas_unlock(&newmas);
26442656

2645-
mtree_destroy(&newmt);
2657+
__mt_destroy(&newmt);
2658+
up_write(&newmt_lock);
26462659
}
26472660

26482661
/* Duplicate many sizes of trees. Mainly to test expected entry values */

mm/damon/sysfs.c

+5-2
Original file line numberDiff line numberDiff line change
@@ -1208,6 +1208,8 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
12081208
return 0;
12091209
}
12101210

1211+
static bool damon_sysfs_schemes_regions_updating;
1212+
12111213
static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
12121214
{
12131215
struct damon_target *t, *next;
@@ -1219,8 +1221,10 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
12191221
cmd = damon_sysfs_cmd_request.cmd;
12201222
if (kdamond && ctx == kdamond->damon_ctx &&
12211223
(cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
1222-
cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES)) {
1224+
cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
1225+
damon_sysfs_schemes_regions_updating) {
12231226
damon_sysfs_schemes_update_regions_stop(ctx);
1227+
damon_sysfs_schemes_regions_updating = false;
12241228
mutex_unlock(&damon_sysfs_lock);
12251229
}
12261230

@@ -1340,7 +1344,6 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
13401344
static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
13411345
{
13421346
struct damon_sysfs_kdamond *kdamond;
1343-
static bool damon_sysfs_schemes_regions_updating;
13441347
bool total_bytes_only = false;
13451348
int err = 0;
13461349

0 commit comments

Comments
 (0)