diff options
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 78 |
1 files changed, 63 insertions, 15 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 6165283bdb6f..ce96866fbec4 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -492,7 +492,7 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x) return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; } -typedef int filler_t(void *, struct page *); +typedef int filler_t(struct file *, struct folio *); pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); @@ -735,7 +735,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping, } struct page *grab_cache_page_write_begin(struct address_space *mapping, - pgoff_t index, unsigned flags); + pgoff_t index); /* * Returns locked page at given index in given cache, creating it if needed. @@ -747,9 +747,9 @@ static inline struct page *grab_cache_page(struct address_space *mapping, } struct folio *read_cache_folio(struct address_space *, pgoff_t index, - filler_t *filler, void *data); + filler_t *filler, struct file *file); struct page *read_cache_page(struct address_space *, pgoff_t index, - filler_t *filler, void *data); + filler_t *filler, struct file *file); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); @@ -888,6 +888,18 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, void unlock_page(struct page *page); void folio_unlock(struct folio *folio); +/** + * folio_trylock() - Attempt to lock a folio. + * @folio: The folio to attempt to lock. + * + * Sometimes it is undesirable to wait for a folio to be unlocked (eg + * when the locks are being taken in the wrong order, or if making + * progress through a batch of folios is more important than processing + * them in order). Usually folio_lock() is the correct function to call. + * + * Context: Any context. + * Return: Whether the lock was successfully acquired. + */ static inline bool folio_trylock(struct folio *folio) { return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); @@ -901,6 +913,28 @@ static inline int trylock_page(struct page *page) return folio_trylock(page_folio(page)); } +/** + * folio_lock() - Lock this folio. + * @folio: The folio to lock. + * + * The folio lock protects against many things, probably more than it + * should. It is primarily held while a folio is being brought uptodate, + * either from its backing file or from swap. It is also held while a + * folio is being truncated from its address_space, so holding the lock + * is sufficient to keep folio->mapping stable. + * + * The folio lock is also held while write() is modifying the page to + * provide POSIX atomicity guarantees (as long as the write does not + * cross a page boundary). Other modifications to the data in the folio + * do not hold the folio lock and can race with writes, eg DMA and stores + * to mapped pages. + * + * Context: May sleep. If you need to acquire the locks of two or + * more folios, they must be in order of ascending index, if they are + * in the same address_space. If they are in different address_spaces, + * acquire the lock of the folio which belongs to the address_space which + * has the lowest address in memory first. + */ static inline void folio_lock(struct folio *folio) { might_sleep(); @@ -908,8 +942,16 @@ static inline void folio_lock(struct folio *folio) __folio_lock(folio); } -/* - * lock_page may only be called if we have the page's inode pinned. +/** + * lock_page() - Lock the folio containing this page. + * @page: The page to lock. + * + * See folio_lock() for a description of what the lock protects. + * This is a legacy function and new code should probably use folio_lock() + * instead. + * + * Context: May sleep. Pages in the same folio share a lock, so do not + * attempt to lock two pages which share a folio. */ static inline void lock_page(struct page *page) { @@ -921,6 +963,16 @@ static inline void lock_page(struct page *page) __folio_lock(folio); } +/** + * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. + * @folio: The folio to lock. + * + * Attempts to lock the folio, like folio_lock(), except that the sleep + * to acquire the lock is interruptible by a fatal signal. + * + * Context: May sleep; see folio_lock(). + * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. + */ static inline int folio_lock_killable(struct folio *folio) { might_sleep(); @@ -967,8 +1019,8 @@ int folio_wait_bit_killable(struct folio *folio, int bit_nr); * Wait for a folio to be unlocked. * * This must be called with the caller "holding" the folio, - * ie with increased "page->count" so that the folio won't - * go away during the wait.. + * ie with increased folio reference count so that the folio won't + * go away during the wait. */ static inline void folio_wait_locked(struct folio *folio) { @@ -1015,10 +1067,6 @@ static inline void folio_cancel_dirty(struct folio *folio) if (folio_test_dirty(folio)) __folio_cancel_dirty(folio); } -static inline void cancel_dirty_page(struct page *page) -{ - folio_cancel_dirty(page_folio(page)); -} bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); void folio_invalidate(struct folio *folio, size_t offset, size_t length); @@ -1191,7 +1239,7 @@ void page_cache_sync_readahead(struct address_space *mapping, * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. - * @page: The page at @index which triggered the readahead call. + * @folio: The folio at @index which triggered the readahead call. * @index: Index of first page to be read. * @req_count: Total number of pages being read by the caller. * @@ -1203,10 +1251,10 @@ void page_cache_sync_readahead(struct address_space *mapping, static inline void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, - struct page *page, pgoff_t index, unsigned long req_count) + struct folio *folio, pgoff_t index, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, index); - page_cache_async_ra(&ractl, page_folio(page), req_count); + page_cache_async_ra(&ractl, folio, req_count); } static inline struct folio *__readahead_folio(struct readahead_control *ractl) |