7 hotfixes. 6 are cc:stable and all are for MM. Please see the

individual changelogs for details.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaeSA8wAKCRDdBJ7gKXxA
 jlKAAP4z9SQH3J71BVZK0pLi/9a/ISEZyJ9zIpsNi1sQ3shO/gD7BoUTQkepX5dk
 dv1YBhroYz981dgIIV3kXpIAEYnoeQc=
 =Os+z
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2026-04-19-00-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull MM fixes from Andrew Morton:
 "7 hotfixes. 6 are cc:stable and all are for MM. Please see the
  individual changelogs for details"

* tag 'mm-hotfixes-stable-2026-04-19-00-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm/damon/core: disallow non-power of two min_region_sz on damon_start()
  mm/vmalloc: take vmap_purge_lock in shrinker
  mm: call ->free_folio() directly in folio_unmap_invalidate()
  mm: blk-cgroup: fix use-after-free in cgwb_release_workfn()
  mm/zone_device: do not touch device folio after calling ->folio_free()
  mm/damon/core: disallow time-quota setting zero esz
  mm/mempolicy: fix weighted interleave auto sysfs name
This commit is contained in:
Linus Torvalds 2026-04-19 14:45:37 -07:00
commit c1f49dea2b
8 changed files with 27 additions and 12 deletions

View File

@ -618,12 +618,13 @@ static void cgwb_release_workfn(struct work_struct *work)
wb_shutdown(wb);
css_put(wb->memcg_css);
css_put(wb->blkcg_css);
mutex_unlock(&wb->bdi->cgwb_release_mutex);
/* triggers blkg destruction if no online users left */
blkcg_unpin_online(wb->blkcg_css);
css_put(wb->blkcg_css);
mutex_unlock(&wb->bdi->cgwb_release_mutex);
fprop_local_destroy_percpu(&wb->memcg_completions);
spin_lock_irq(&cgwb_lock);

View File

@ -1477,6 +1477,11 @@ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
int i;
int err = 0;
for (i = 0; i < nr_ctxs; i++) {
if (!is_power_of_2(ctxs[i]->min_region_sz))
return -EINVAL;
}
mutex_lock(&damon_lock);
if ((exclusive && nr_running_ctxs) ||
(!exclusive && running_exclusive_ctxs)) {
@ -2384,7 +2389,8 @@ static void damos_goal_tune_esz_bp_temporal(struct damos_quota *quota)
/*
* Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
*/
static void damos_set_effective_quota(struct damos_quota *quota)
static void damos_set_effective_quota(struct damos_quota *quota,
struct damon_ctx *ctx)
{
unsigned long throughput;
unsigned long esz = ULONG_MAX;
@ -2409,6 +2415,7 @@ static void damos_set_effective_quota(struct damos_quota *quota)
else
throughput = PAGE_SIZE * 1024;
esz = min(throughput * quota->ms, esz);
esz = max(ctx->min_region_sz, esz);
}
if (quota->sz && quota->sz < esz)
@ -2445,7 +2452,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
/* First charge window */
if (!quota->total_charged_sz && !quota->charged_from) {
quota->charged_from = jiffies;
damos_set_effective_quota(quota);
damos_set_effective_quota(quota, c);
}
/* New charge window starts */
@ -2460,7 +2467,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
quota->charged_sz = 0;
if (trace_damos_esz_enabled())
cached_esz = quota->esz;
damos_set_effective_quota(quota);
damos_set_effective_quota(quota, c);
if (trace_damos_esz_enabled() && quota->esz != cached_esz)
damos_trace_esz(c, s, quota);
}

View File

@ -228,7 +228,8 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
page_cache_delete(mapping, folio, shadow);
}
void filemap_free_folio(struct address_space *mapping, struct folio *folio)
static void filemap_free_folio(const struct address_space *mapping,
struct folio *folio)
{
void (*free_folio)(struct folio *);

View File

@ -557,7 +557,6 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);

View File

@ -3788,9 +3788,11 @@ static void wi_state_free(void)
}
}
static struct kobj_attribute wi_auto_attr =
__ATTR(auto, 0664, weighted_interleave_auto_show,
weighted_interleave_auto_store);
static struct kobj_attribute wi_auto_attr = {
.attr = { .name = "auto", .mode = 0664 },
.show = weighted_interleave_auto_show,
.store = weighted_interleave_auto_store,
};
static void wi_cleanup(void) {
sysfs_remove_file(&wi_group->wi_kobj, &wi_auto_attr.attr);

View File

@ -454,7 +454,7 @@ void free_zone_device_folio(struct folio *folio)
if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->folio_free))
break;
pgmap->ops->folio_free(folio);
percpu_ref_put_many(&folio->pgmap->ref, nr);
percpu_ref_put_many(&pgmap->ref, nr);
break;
case MEMORY_DEVICE_GENERIC:

View File

@ -622,6 +622,7 @@ static int folio_launder(struct address_space *mapping, struct folio *folio)
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
gfp_t gfp)
{
void (*free_folio)(struct folio *);
int ret;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
@ -648,9 +649,12 @@ int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
inode_lru_list_add(mapping->host);
free_folio = mapping->a_ops->free_folio;
spin_unlock(&mapping->host->i_lock);
filemap_free_folio(mapping, folio);
if (free_folio)
free_folio(folio);
folio_put_refs(folio, folio_nr_pages(folio));
return 1;
failed:
xa_unlock_irq(&mapping->i_pages);

View File

@ -5416,6 +5416,7 @@ vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct vmap_node *vn;
guard(mutex)(&vmap_purge_lock);
for_each_vmap_node(vn)
decay_va_pool_node(vn, true);