summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c20
-rw-r--r--mm/kasan/generic.c4
-rw-r--r--mm/kasan/hw_tags.c60
-rw-r--r--mm/kasan/kasan.h33
-rw-r--r--mm/kasan/report.c41
-rw-r--r--mm/kasan/report_generic.c32
-rw-r--r--mm/kasan/report_hw_tags.c35
-rw-r--r--mm/kasan/report_sw_tags.c26
-rw-r--r--mm/kasan/report_tags.c2
-rw-r--r--mm/kasan/sw_tags.c6
10 files changed, 226 insertions, 33 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 21e66d7f261d..b376a5d055e5 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -43,7 +43,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
- return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
+ return __stack_depot_save(entries, nr_entries, flags, can_alloc);
}
void kasan_set_track(struct kasan_track *track, gfp_t flags)
@@ -95,19 +95,24 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
}
#endif /* CONFIG_KASAN_STACK */
-void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
+bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
unsigned long i;
if (unlikely(PageHighMem(page)))
- return;
+ return false;
+
+ if (!kasan_sample_page_alloc(order))
+ return false;
tag = kasan_random_tag();
kasan_unpoison(set_tag(page_address(page), tag),
PAGE_SIZE << order, init);
for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag);
+
+ return true;
}
void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
@@ -117,11 +122,6 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
KASAN_PAGE_FREE, init);
}
-void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
-{
- cache->kasan_info.is_kmalloc = true;
-}
-
void __kasan_poison_slab(struct slab *slab)
{
struct page *page = slab_page(slab);
@@ -324,7 +324,7 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
kasan_unpoison(tagged_object, cache->object_size, init);
/* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
+ if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, tagged_object, flags);
return tagged_object;
@@ -370,7 +370,7 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
* Save alloc info (if possible) for kmalloc() allocations.
* This also rewrites the alloc info when called from kasan_krealloc().
*/
- if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
+ if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, (void *)object, flags);
/* Keep the tag that was set by kasan_slab_alloc(). */
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index cb762982c8ba..e5eef670735e 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -172,10 +172,8 @@ static __always_inline bool check_region_inline(unsigned long addr,
if (unlikely(addr + size < addr))
return !kasan_report(addr, size, write, ret_ip);
- if (unlikely((void *)addr <
- kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
+ if (unlikely(!addr_has_metadata((void *)addr)))
return !kasan_report(addr, size, write, ret_ip);
- }
if (likely(!memory_is_poisoned(addr, size)))
return true;
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index b22c4f461cb0..d1bcb0205327 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -59,6 +59,24 @@ EXPORT_SYMBOL_GPL(kasan_mode);
/* Whether to enable vmalloc tagging. */
DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
+#define PAGE_ALLOC_SAMPLE_DEFAULT 1
+#define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3
+
+/*
+ * Sampling interval of page_alloc allocation (un)poisoning.
+ * Defaults to no sampling.
+ */
+unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
+
+/*
+ * Minimum order of page_alloc allocations to be affected by sampling.
+ * The default value is chosen to match both
+ * PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
+ */
+unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
+
+DEFINE_PER_CPU(long, kasan_page_alloc_skip);
+
/* kasan=off/on */
static int __init early_kasan_flag(char *arg)
{
@@ -122,6 +140,48 @@ static inline const char *kasan_mode_info(void)
return "sync";
}
+/* kasan.page_alloc.sample=<sampling interval> */
+static int __init early_kasan_flag_page_alloc_sample(char *arg)
+{
+ int rv;
+
+ if (!arg)
+ return -EINVAL;
+
+ rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
+ if (rv)
+ return rv;
+
+ if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
+ kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
+
+/* kasan.page_alloc.sample.order=<minimum page order> */
+static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
+{
+ int rv;
+
+ if (!arg)
+ return -EINVAL;
+
+ rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
+ if (rv)
+ return rv;
+
+ if (kasan_page_alloc_sample_order > INT_MAX) {
+ kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
+
/*
* kasan_init_hw_tags_cpu() is called for each CPU.
* Not marked as __init as a CPU can be hot-plugged after boot.
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 71c15438afcf..9377b0789edc 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -42,6 +42,10 @@ enum kasan_mode {
extern enum kasan_mode kasan_mode __ro_after_init;
+extern unsigned long kasan_page_alloc_sample;
+extern unsigned int kasan_page_alloc_sample_order;
+DECLARE_PER_CPU(long, kasan_page_alloc_skip);
+
static inline bool kasan_vmalloc_enabled(void)
{
return static_branch_likely(&kasan_flag_vmalloc);
@@ -57,6 +61,24 @@ static inline bool kasan_sync_fault_possible(void)
return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
}
+static inline bool kasan_sample_page_alloc(unsigned int order)
+{
+ /* Fast-path for when sampling is disabled. */
+ if (kasan_page_alloc_sample == 1)
+ return true;
+
+ if (order < kasan_page_alloc_sample_order)
+ return true;
+
+ if (this_cpu_dec_return(kasan_page_alloc_skip) < 0) {
+ this_cpu_write(kasan_page_alloc_skip,
+ kasan_page_alloc_sample - 1);
+ return true;
+ }
+
+ return false;
+}
+
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_async_fault_possible(void)
@@ -69,6 +91,11 @@ static inline bool kasan_sync_fault_possible(void)
return true;
}
+static inline bool kasan_sample_page_alloc(unsigned int order)
+{
+ return true;
+}
+
#endif /* CONFIG_KASAN_HW_TAGS */
#ifdef CONFIG_KASAN_GENERIC
@@ -180,6 +207,7 @@ struct kasan_report_info {
void *first_bad_addr;
struct kmem_cache *cache;
void *object;
+ size_t alloc_size;
/* Filled in by the mode-specific reporting code. */
const char *bug_type;
@@ -269,7 +297,7 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
<< KASAN_SHADOW_SCALE_SHIFT);
}
-static inline bool addr_has_metadata(const void *addr)
+static __always_inline bool addr_has_metadata(const void *addr)
{
return (kasan_reset_tag(addr) >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
@@ -288,7 +316,7 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-static inline bool addr_has_metadata(const void *addr)
+static __always_inline bool addr_has_metadata(const void *addr)
{
return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
}
@@ -296,6 +324,7 @@ static inline bool addr_has_metadata(const void *addr)
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
void *kasan_find_first_bad_addr(void *addr, size_t size);
+size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache);
void kasan_complete_mode_report_info(struct kasan_report_info *info);
void kasan_metadata_fetch_row(char *buffer, void *row);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 22598b20c7b7..89078f912827 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -231,33 +231,46 @@ static inline struct page *addr_to_page(const void *addr)
return NULL;
}
-static void describe_object_addr(const void *addr, struct kmem_cache *cache,
- void *object)
+static void describe_object_addr(const void *addr, struct kasan_report_info *info)
{
unsigned long access_addr = (unsigned long)addr;
- unsigned long object_addr = (unsigned long)object;
- const char *rel_type;
+ unsigned long object_addr = (unsigned long)info->object;
+ const char *rel_type, *region_state = "";
int rel_bytes;
pr_err("The buggy address belongs to the object at %px\n"
" which belongs to the cache %s of size %d\n",
- object, cache->name, cache->object_size);
+ info->object, info->cache->name, info->cache->object_size);
if (access_addr < object_addr) {
rel_type = "to the left";
rel_bytes = object_addr - access_addr;
- } else if (access_addr >= object_addr + cache->object_size) {
+ } else if (access_addr >= object_addr + info->alloc_size) {
rel_type = "to the right";
- rel_bytes = access_addr - (object_addr + cache->object_size);
+ rel_bytes = access_addr - (object_addr + info->alloc_size);
} else {
rel_type = "inside";
rel_bytes = access_addr - object_addr;
}
+ /*
+ * Tag-Based modes use the stack ring to infer the bug type, but the
+ * memory region state description is generated based on the metadata.
+ * Thus, defining the region state as below can contradict the metadata.
+ * Fixing this requires further improvements, so only infer the state
+ * for the Generic mode.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ if (strcmp(info->bug_type, "slab-out-of-bounds") == 0)
+ region_state = "allocated ";
+ else if (strcmp(info->bug_type, "slab-use-after-free") == 0)
+ region_state = "freed ";
+ }
+
pr_err("The buggy address is located %d bytes %s of\n"
- " %d-byte region [%px, %px)\n",
- rel_bytes, rel_type, cache->object_size, (void *)object_addr,
- (void *)(object_addr + cache->object_size));
+ " %s%zu-byte region [%px, %px)\n",
+ rel_bytes, rel_type, region_state, info->alloc_size,
+ (void *)object_addr, (void *)(object_addr + info->alloc_size));
}
static void describe_object_stacks(struct kasan_report_info *info)
@@ -279,7 +292,7 @@ static void describe_object(const void *addr, struct kasan_report_info *info)
{
if (kasan_stack_collection_enabled())
describe_object_stacks(info);
- describe_object_addr(addr, info->cache, info->object);
+ describe_object_addr(addr, info);
}
static inline bool kernel_or_module_addr(const void *addr)
@@ -436,6 +449,12 @@ static void complete_report_info(struct kasan_report_info *info)
if (slab) {
info->cache = slab->slab_cache;
info->object = nearest_obj(info->cache, slab, addr);
+
+ /* Try to determine allocation size based on the metadata. */
+ info->alloc_size = kasan_get_alloc_size(info->object, info->cache);
+ /* Fallback to the object size if failed. */
+ if (!info->alloc_size)
+ info->alloc_size = info->cache->object_size;
} else
info->cache = info->object = NULL;
diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
index 043c94b04605..87d39bc0a673 100644
--- a/mm/kasan/report_generic.c
+++ b/mm/kasan/report_generic.c
@@ -43,6 +43,34 @@ void *kasan_find_first_bad_addr(void *addr, size_t size)
return p;
}
+size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache)
+{
+ size_t size = 0;
+ u8 *shadow;
+
+ /*
+ * Skip the addr_has_metadata check, as this function only operates on
+ * slab memory, which must have metadata.
+ */
+
+ /*
+ * The loop below returns 0 for freed objects, for which KASAN cannot
+ * calculate the allocation size based on the metadata.
+ */
+ shadow = (u8 *)kasan_mem_to_shadow(object);
+ while (size < cache->object_size) {
+ if (*shadow == 0)
+ size += KASAN_GRANULE_SIZE;
+ else if (*shadow >= 1 && *shadow <= KASAN_GRANULE_SIZE - 1)
+ return size + *shadow;
+ else
+ return size;
+ shadow++;
+ }
+
+ return cache->object_size;
+}
+
static const char *get_shadow_bug_type(struct kasan_report_info *info)
{
const char *bug_type = "unknown-crash";
@@ -79,9 +107,11 @@ static const char *get_shadow_bug_type(struct kasan_report_info *info)
bug_type = "stack-out-of-bounds";
break;
case KASAN_PAGE_FREE:
+ bug_type = "use-after-free";
+ break;
case KASAN_SLAB_FREE:
case KASAN_SLAB_FREETRACK:
- bug_type = "use-after-free";
+ bug_type = "slab-use-after-free";
break;
case KASAN_ALLOCA_LEFT:
case KASAN_ALLOCA_RIGHT:
diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c
index f3d3be614e4b..32e80f78de7d 100644
--- a/mm/kasan/report_hw_tags.c
+++ b/mm/kasan/report_hw_tags.c
@@ -17,10 +17,43 @@
void *kasan_find_first_bad_addr(void *addr, size_t size)
{
- /* Return the same value regardless of whether addr_has_metadata(). */
+ /*
+ * Hardware Tag-Based KASAN only calls this function for normal memory
+ * accesses, and thus addr points precisely to the first bad address
+ * with an invalid (and present) memory tag. Therefore:
+ * 1. Return the address as is without walking memory tags.
+ * 2. Skip the addr_has_metadata check.
+ */
return kasan_reset_tag(addr);
}
+size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache)
+{
+ size_t size = 0;
+ int i = 0;
+ u8 memory_tag;
+
+ /*
+ * Skip the addr_has_metadata check, as this function only operates on
+ * slab memory, which must have metadata.
+ */
+
+ /*
+ * The loop below returns 0 for freed objects, for which KASAN cannot
+ * calculate the allocation size based on the metadata.
+ */
+ while (size < cache->object_size) {
+ memory_tag = hw_get_mem_tag(object + i * KASAN_GRANULE_SIZE);
+ if (memory_tag != KASAN_TAG_INVALID)
+ size += KASAN_GRANULE_SIZE;
+ else
+ return size;
+ i++;
+ }
+
+ return cache->object_size;
+}
+
void kasan_metadata_fetch_row(char *buffer, void *row)
{
int i;
diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c
index 7a26397297ed..8b1f5a73ee6d 100644
--- a/mm/kasan/report_sw_tags.c
+++ b/mm/kasan/report_sw_tags.c
@@ -45,6 +45,32 @@ void *kasan_find_first_bad_addr(void *addr, size_t size)
return p;
}
+size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache)
+{
+ size_t size = 0;
+ u8 *shadow;
+
+ /*
+ * Skip the addr_has_metadata check, as this function only operates on
+ * slab memory, which must have metadata.
+ */
+
+ /*
+ * The loop below returns 0 for freed objects, for which KASAN cannot
+ * calculate the allocation size based on the metadata.
+ */
+ shadow = (u8 *)kasan_mem_to_shadow(object);
+ while (size < cache->object_size) {
+ if (*shadow != KASAN_TAG_INVALID)
+ size += KASAN_GRANULE_SIZE;
+ else
+ return size;
+ shadow++;
+ }
+
+ return cache->object_size;
+}
+
void kasan_metadata_fetch_row(char *buffer, void *row)
{
memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
index ecede06ef374..8b8bfdb3cfdb 100644
--- a/mm/kasan/report_tags.c
+++ b/mm/kasan/report_tags.c
@@ -89,7 +89,7 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
* a use-after-free.
*/
if (!info->bug_type)
- info->bug_type = "use-after-free";
+ info->bug_type = "slab-use-after-free";
} else {
/* Second alloc of the same object. Give up. */
if (alloc_found)
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index a3afaf2ad1b1..30da65fa02a1 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -106,10 +106,8 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
return true;
untagged_addr = kasan_reset_tag((const void *)addr);
- if (unlikely(untagged_addr <
- kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
+ if (unlikely(!addr_has_metadata(untagged_addr)))
return !kasan_report(addr, size, write, ret_ip);
- }
shadow_first = kasan_mem_to_shadow(untagged_addr);
shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
@@ -127,7 +125,7 @@ bool kasan_byte_accessible(const void *addr)
void *untagged_addr = kasan_reset_tag(addr);
u8 shadow_byte;
- if (untagged_addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START))
+ if (!addr_has_metadata(untagged_addr))
return false;
shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(untagged_addr));