diff options
author | Jeff Layton <jlayton@kernel.org> | 2022-06-10 17:40:13 +0200 |
---|---|---|
committer | Ilya Dryomov <idryomov@gmail.com> | 2022-08-03 00:54:12 +0200 |
commit | 020bc44a9fbf6946f42db503d11c9811f26dd9fd (patch) | |
tree | 5a373e7dad5f73487bb9ce7cd6b71aaada51470f | |
parent | ceph: call netfs_subreq_terminated with was_async == false (diff) | |
download | linux-020bc44a9fbf6946f42db503d11c9811f26dd9fd.tar.xz linux-020bc44a9fbf6946f42db503d11c9811f26dd9fd.zip |
ceph: switch back to testing for NULL folio->private in ceph_dirty_folio
Willy requested that we change this back to warning on folio->private
being non-NULl. He's trying to kill off the PG_private flag, and so we'd
like to catch where it's non-NULL.
Add a VM_WARN_ON_FOLIO (since it doesn't exist yet) and change over to
using that instead of VM_BUG_ON_FOLIO along with testing the ->private
pointer.
[ xiubli: define VM_WARN_ON_FOLIO macro in case DEBUG_VM is disabled
reported by kernel test robot <lkp@intel.com> ]
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Xiubo Li <xiubli@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
-rw-r--r-- | fs/ceph/addr.c | 2 | ||||
-rw-r--r-- | include/linux/mmdebug.h | 10 |
2 files changed, 11 insertions, 1 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 01fe75b8d146..31fc04eeb4d0 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -122,7 +122,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio) * Reference snap context in folio->private. Also set * PagePrivate so that we get invalidate_folio callback. */ - VM_BUG_ON_FOLIO(folio_test_private(folio), folio); + VM_WARN_ON_FOLIO(folio->private, folio); folio_attach_private(folio, snapc); return ceph_fscache_dirty_folio(mapping, folio); diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index d7285f8148a3..15ae78cd2853 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -54,6 +54,15 @@ void dump_mm(const struct mm_struct *mm); } \ unlikely(__ret_warn_once); \ }) +#define VM_WARN_ON_FOLIO(cond, folio) ({ \ + int __ret_warn = !!(cond); \ + \ + if (unlikely(__ret_warn)) { \ + dump_page(&folio->page, "VM_WARN_ON_FOLIO(" __stringify(cond)")");\ + WARN_ON(1); \ + } \ + unlikely(__ret_warn); \ +}) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \ static bool __section(".data.once") __warned; \ int __ret_warn_once = !!(cond); \ @@ -79,6 +88,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) |