diff options
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 12 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 28 |
2 files changed, 26 insertions, 14 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index c1c1cdf0b811..1031bdf9f012 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4927,9 +4927,9 @@ unlock: rcu_read_unlock(); } -static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int perf_mmap_fault(struct vm_fault *vmf) { - struct perf_event *event = vma->vm_file->private_data; + struct perf_event *event = vmf->vma->vm_file->private_data; struct ring_buffer *rb; int ret = VM_FAULT_SIGBUS; @@ -4952,7 +4952,7 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unlock; get_page(vmf->page); - vmf->page->mapping = vma->vm_file->f_mapping; + vmf->page->mapping = vmf->vma->vm_file->f_mapping; vmf->page->index = vmf->pgoff; ret = 0; @@ -10955,5 +10955,11 @@ struct cgroup_subsys perf_event_cgrp_subsys = { .css_alloc = perf_cgroup_css_alloc, .css_free = perf_cgroup_css_free, .attach = perf_cgroup_attach, + /* + * Implicitly enable on dfl hierarchy so that perf events can + * always be filtered by cgroup2 path as long as perf_event + * controller is not mounted on a legacy hierarchy. + */ + .implicit_on_dfl = true, }; #endif /* CONFIG_CGROUP_PERF */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d416f3baf392..d630f8ac4d2f 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -153,14 +153,19 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, struct page *old_page, struct page *new_page) { struct mm_struct *mm = vma->vm_mm; - spinlock_t *ptl; - pte_t *ptep; + struct page_vma_mapped_walk pvmw = { + .page = old_page, + .vma = vma, + .address = addr, + }; int err; /* For mmu_notifiers */ const unsigned long mmun_start = addr; const unsigned long mmun_end = addr + PAGE_SIZE; struct mem_cgroup *memcg; + VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page); + err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, false); if (err) @@ -171,11 +176,11 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); err = -EAGAIN; - ptep = page_check_address(old_page, mm, addr, &ptl, 0); - if (!ptep) { + if (!page_vma_mapped_walk(&pvmw)) { mem_cgroup_cancel_charge(new_page, memcg, false); goto unlock; } + VM_BUG_ON_PAGE(addr != pvmw.address, old_page); get_page(new_page); page_add_new_anon_rmap(new_page, vma, addr, false); @@ -187,14 +192,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(*ptep)); - ptep_clear_flush_notify(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot)); + flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); + ptep_clear_flush_notify(vma, addr, pvmw.pte); + set_pte_at_notify(mm, addr, pvmw.pte, + mk_pte(new_page, vma->vm_page_prot)); page_remove_rmap(old_page, false); if (!page_mapped(old_page)) try_to_free_swap(old_page); - pte_unmap_unlock(ptep, ptl); + page_vma_mapped_walk_done(&pvmw); if (vma->vm_flags & VM_LOCKED) munlock_vma_page(old_page); @@ -300,8 +306,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, retry: /* Read the page with vaddr into memory */ - ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, - &vma, NULL); + ret = get_user_pages_remote(NULL, mm, vaddr, 1, + FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL); if (ret <= 0) return ret; @@ -741,7 +747,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) continue; } - if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) + if (!mmget_not_zero(vma->vm_mm)) continue; info = prev; |