mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
mm: rename unlock_page_lruvec_irq and its variants
It is inappropriate to use folio_lruvec_lock() variants in conjunction
with unlock_page_lruvec() variants, as this involves the inconsistent
operation of locking a folio while unlocking a page. To rectify this, the
functions unlock_page_lruvec{_irq, _irqrestore} are renamed to
lruvec_unlock{_irq,_irqrestore}.
Link: https://lore.kernel.org/4e5e05271a250df4d1812e1832be65636a78c957.1772711148.git.zhengqi.arch@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Chen Ridong <chenridong@huawei.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Allen Pais <apais@linux.microsoft.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Hamza Mahfooz <hamzamahfooz@linux.microsoft.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Imran Khan <imran.f.khan@oracle.com>
Cc: Kamalesh Babulal <kamalesh.babulal@oracle.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Cc: Wei Xu <weixugc@google.com>
Cc: Yosry Ahmed <yosry@kernel.org>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2b33c342f7
commit
db128b2c6b
|
|
@ -1479,17 +1479,17 @@ static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
|
||||||
return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
|
return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void unlock_page_lruvec(struct lruvec *lruvec)
|
static inline void lruvec_unlock(struct lruvec *lruvec)
|
||||||
{
|
{
|
||||||
spin_unlock(&lruvec->lru_lock);
|
spin_unlock(&lruvec->lru_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
|
static inline void lruvec_unlock_irq(struct lruvec *lruvec)
|
||||||
{
|
{
|
||||||
spin_unlock_irq(&lruvec->lru_lock);
|
spin_unlock_irq(&lruvec->lru_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
|
static inline void lruvec_unlock_irqrestore(struct lruvec *lruvec,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
|
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
|
||||||
|
|
@ -1511,7 +1511,7 @@ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
|
||||||
if (folio_matches_lruvec(folio, locked_lruvec))
|
if (folio_matches_lruvec(folio, locked_lruvec))
|
||||||
return locked_lruvec;
|
return locked_lruvec;
|
||||||
|
|
||||||
unlock_page_lruvec_irq(locked_lruvec);
|
lruvec_unlock_irq(locked_lruvec);
|
||||||
}
|
}
|
||||||
|
|
||||||
return folio_lruvec_lock_irq(folio);
|
return folio_lruvec_lock_irq(folio);
|
||||||
|
|
@ -1525,7 +1525,7 @@ static inline void folio_lruvec_relock_irqsave(struct folio *folio,
|
||||||
if (folio_matches_lruvec(folio, *lruvecp))
|
if (folio_matches_lruvec(folio, *lruvecp))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
unlock_page_lruvec_irqrestore(*lruvecp, *flags);
|
lruvec_unlock_irqrestore(*lruvecp, *flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
*lruvecp = folio_lruvec_lock_irqsave(folio, flags);
|
*lruvecp = folio_lruvec_lock_irqsave(folio, flags);
|
||||||
|
|
|
||||||
|
|
@ -913,7 +913,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
*/
|
*/
|
||||||
if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
|
if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
|
||||||
if (locked) {
|
if (locked) {
|
||||||
unlock_page_lruvec_irqrestore(locked, flags);
|
lruvec_unlock_irqrestore(locked, flags);
|
||||||
locked = NULL;
|
locked = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -964,7 +964,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
}
|
}
|
||||||
/* for alloc_contig case */
|
/* for alloc_contig case */
|
||||||
if (locked) {
|
if (locked) {
|
||||||
unlock_page_lruvec_irqrestore(locked, flags);
|
lruvec_unlock_irqrestore(locked, flags);
|
||||||
locked = NULL;
|
locked = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1053,7 +1053,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
if (unlikely(page_has_movable_ops(page)) &&
|
if (unlikely(page_has_movable_ops(page)) &&
|
||||||
!PageMovableOpsIsolated(page)) {
|
!PageMovableOpsIsolated(page)) {
|
||||||
if (locked) {
|
if (locked) {
|
||||||
unlock_page_lruvec_irqrestore(locked, flags);
|
lruvec_unlock_irqrestore(locked, flags);
|
||||||
locked = NULL;
|
locked = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1158,7 +1158,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
/* If we already hold the lock, we can skip some rechecking */
|
/* If we already hold the lock, we can skip some rechecking */
|
||||||
if (lruvec != locked) {
|
if (lruvec != locked) {
|
||||||
if (locked)
|
if (locked)
|
||||||
unlock_page_lruvec_irqrestore(locked, flags);
|
lruvec_unlock_irqrestore(locked, flags);
|
||||||
|
|
||||||
compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
|
compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
|
||||||
locked = lruvec;
|
locked = lruvec;
|
||||||
|
|
@ -1226,7 +1226,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
isolate_fail_put:
|
isolate_fail_put:
|
||||||
/* Avoid potential deadlock in freeing page under lru_lock */
|
/* Avoid potential deadlock in freeing page under lru_lock */
|
||||||
if (locked) {
|
if (locked) {
|
||||||
unlock_page_lruvec_irqrestore(locked, flags);
|
lruvec_unlock_irqrestore(locked, flags);
|
||||||
locked = NULL;
|
locked = NULL;
|
||||||
}
|
}
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
|
@ -1242,7 +1242,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
*/
|
*/
|
||||||
if (nr_isolated) {
|
if (nr_isolated) {
|
||||||
if (locked) {
|
if (locked) {
|
||||||
unlock_page_lruvec_irqrestore(locked, flags);
|
lruvec_unlock_irqrestore(locked, flags);
|
||||||
locked = NULL;
|
locked = NULL;
|
||||||
}
|
}
|
||||||
putback_movable_pages(&cc->migratepages);
|
putback_movable_pages(&cc->migratepages);
|
||||||
|
|
@ -1274,7 +1274,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
|
|
||||||
isolate_abort:
|
isolate_abort:
|
||||||
if (locked)
|
if (locked)
|
||||||
unlock_page_lruvec_irqrestore(locked, flags);
|
lruvec_unlock_irqrestore(locked, flags);
|
||||||
if (folio) {
|
if (folio) {
|
||||||
folio_set_lru(folio);
|
folio_set_lru(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
|
|
||||||
|
|
@ -3994,7 +3994,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
|
||||||
folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
|
folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
|
||||||
|
|
||||||
if (do_lru)
|
if (do_lru)
|
||||||
unlock_page_lruvec(lruvec);
|
lruvec_unlock(lruvec);
|
||||||
|
|
||||||
if (ci)
|
if (ci)
|
||||||
swap_cluster_unlock(ci);
|
swap_cluster_unlock(ci);
|
||||||
|
|
|
||||||
|
|
@ -205,7 +205,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lruvec)
|
if (lruvec)
|
||||||
unlock_page_lruvec_irq(lruvec);
|
lruvec_unlock_irq(lruvec);
|
||||||
folios_put(fbatch);
|
folios_put(fbatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
12
mm/swap.c
12
mm/swap.c
|
|
@ -91,7 +91,7 @@ static void page_cache_release(struct folio *folio)
|
||||||
|
|
||||||
__page_cache_release(folio, &lruvec, &flags);
|
__page_cache_release(folio, &lruvec, &flags);
|
||||||
if (lruvec)
|
if (lruvec)
|
||||||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
lruvec_unlock_irqrestore(lruvec, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __folio_put(struct folio *folio)
|
void __folio_put(struct folio *folio)
|
||||||
|
|
@ -175,7 +175,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lruvec)
|
if (lruvec)
|
||||||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
lruvec_unlock_irqrestore(lruvec, flags);
|
||||||
folios_put(fbatch);
|
folios_put(fbatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -349,7 +349,7 @@ void folio_activate(struct folio *folio)
|
||||||
|
|
||||||
lruvec = folio_lruvec_lock_irq(folio);
|
lruvec = folio_lruvec_lock_irq(folio);
|
||||||
lru_activate(lruvec, folio);
|
lru_activate(lruvec, folio);
|
||||||
unlock_page_lruvec_irq(lruvec);
|
lruvec_unlock_irq(lruvec);
|
||||||
folio_set_lru(folio);
|
folio_set_lru(folio);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -963,7 +963,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
|
||||||
|
|
||||||
if (folio_is_zone_device(folio)) {
|
if (folio_is_zone_device(folio)) {
|
||||||
if (lruvec) {
|
if (lruvec) {
|
||||||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
lruvec_unlock_irqrestore(lruvec, flags);
|
||||||
lruvec = NULL;
|
lruvec = NULL;
|
||||||
}
|
}
|
||||||
if (folio_ref_sub_and_test(folio, nr_refs))
|
if (folio_ref_sub_and_test(folio, nr_refs))
|
||||||
|
|
@ -977,7 +977,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
|
||||||
/* hugetlb has its own memcg */
|
/* hugetlb has its own memcg */
|
||||||
if (folio_test_hugetlb(folio)) {
|
if (folio_test_hugetlb(folio)) {
|
||||||
if (lruvec) {
|
if (lruvec) {
|
||||||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
lruvec_unlock_irqrestore(lruvec, flags);
|
||||||
lruvec = NULL;
|
lruvec = NULL;
|
||||||
}
|
}
|
||||||
free_huge_folio(folio);
|
free_huge_folio(folio);
|
||||||
|
|
@ -991,7 +991,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
|
||||||
j++;
|
j++;
|
||||||
}
|
}
|
||||||
if (lruvec)
|
if (lruvec)
|
||||||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
lruvec_unlock_irqrestore(lruvec, flags);
|
||||||
if (!j) {
|
if (!j) {
|
||||||
folio_batch_reinit(folios);
|
folio_batch_reinit(folios);
|
||||||
return;
|
return;
|
||||||
|
|
|
||||||
|
|
@ -1831,7 +1831,7 @@ bool folio_isolate_lru(struct folio *folio)
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
lruvec = folio_lruvec_lock_irq(folio);
|
lruvec = folio_lruvec_lock_irq(folio);
|
||||||
lruvec_del_folio(lruvec, folio);
|
lruvec_del_folio(lruvec, folio);
|
||||||
unlock_page_lruvec_irq(lruvec);
|
lruvec_unlock_irq(lruvec);
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -7898,7 +7898,7 @@ void check_move_unevictable_folios(struct folio_batch *fbatch)
|
||||||
if (lruvec) {
|
if (lruvec) {
|
||||||
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
|
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
|
||||||
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
|
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
|
||||||
unlock_page_lruvec_irq(lruvec);
|
lruvec_unlock_irq(lruvec);
|
||||||
} else if (pgscanned) {
|
} else if (pgscanned) {
|
||||||
count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
|
count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user