when kernel uses kmalloc to allocate memory, slub/slab will find
a suitable kmem_cache. The cache's object size is often greater than
requested size. There is unused space which contains dirty data. These
dirty data might have pointers pointing to a block of leaked memory.
Kernel wouldn't consider this memory as leaked when scanning kmemleak object.
The patch fixes it by updating kmemleak object size with requested size,
so kmemleak won't scan the unused space.
Signed-off-by: Chen Lin Z <
lin.z...@intel.com>
Signed-off-by: Liu, XinwuX <
xinwu...@intel.com>
---
include/linux/kmemleak.h | 4 ++++
mm/kmemleak.c | 40 +++++++++++++++++++++++++++++++++++++++-
mm/slab.c | 11 ++++++++++-
mm/slub.c | 12 ++++++++++++
4 files changed, 65 insertions(+), 2 deletions(-)
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index e705467..cc35a2f 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -37,6 +37,7 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
+extern void kmemleak_set_size(const void *ptr, size_t size) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
int min_count, unsigned long flags,
@@ -104,6 +105,9 @@ static inline void kmemleak_erase(void **ptr)
static inline void kmemleak_no_scan(const void *ptr)
{
}
+static inline void kmemleak_set_size(const void *ptr, size_t size)
+{
+}
#endif /* CONFIG_DEBUG_KMEMLEAK */
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index f0fe4f2..487086e 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -241,7 +241,8 @@ enum {
KMEMLEAK_NOT_LEAK,
KMEMLEAK_IGNORE,
KMEMLEAK_SCAN_AREA,
- KMEMLEAK_NO_SCAN
+ KMEMLEAK_NO_SCAN,
+ KMEMLEAK_SET_SIZE
};
/*
@@ -799,6 +800,23 @@ static void object_no_scan(unsigned long ptr)
}
/*
+ * Set the size for an allocated object.
+ */
+static void __object_set_size(unsigned long ptr, size_t size)
+{
+ unsigned long flags;
+ struct kmemleak_object *object;
+
+ object = find_and_get_object(ptr, 0);
+ if (!object) {
+ kmemleak_warn("Try to set unknown object at 0x%08lx\n", ptr);
+ return;
+ }
+ object->size = size;
+ put_object(object);
+}
+
+/*
* Log an early kmemleak_* call to the early_log buffer. These calls will be
* processed later once kmemleak is fully initialized.
*/
@@ -1105,6 +1123,23 @@ void __ref kmemleak_no_scan(const void *ptr)
}
EXPORT_SYMBOL(kmemleak_no_scan);
+/**
+ * kmemleak_set_size - set an allocated object's size
+ * @ptr: pointer to beginning of the object
+ * @size: the new size of the allocated object
+ *
+ * The function need to be called before allocation function returns.
+ */
+void __ref kmemleak_set_size(const void *ptr, size_t size)
+{
+
+ if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+ __object_set_size((unsigned long)ptr, size);
+ else if (kmemleak_early_log)
+ log_early(KMEMLEAK_SET_SIZE, ptr, size, 0);
+}
+EXPORT_SYMBOL(kmemleak_set_size);
+
/*
* Update an object's checksum and return true if it was modified.
*/
@@ -1880,6 +1915,9 @@ void __init kmemleak_init(void)
case KMEMLEAK_NO_SCAN:
kmemleak_no_scan(log->ptr);
break;
+ case KMEMLEAK_SET_SIZE:
+ kmemleak_set_size(log->ptr, log->size);
+ break;
default:
kmemleak_warn("Unknown early log operation: %d\n",
log->op_type);
diff --git a/mm/slab.c b/mm/slab.c
index 7eb38dd..90bc4fe 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3476,11 +3476,17 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *cachep;
+ void *ret;
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
+ ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
+
+ if (size < cachep->object_size)
+ kmemleak_set_size(ret, size);
+
+ return ret;
}
void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3517,6 +3523,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
trace_kmalloc(caller, ret,
size, cachep->size, flags);
+ if (size < cachep->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
diff --git a/mm/slub.c b/mm/slub.c
index 54c0876..4ef17e5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3321,6 +3321,9 @@ void *__kmalloc(size_t size, gfp_t flags)
kasan_kmalloc(s, ret, size);
+ if (size < s->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc);
@@ -3366,6 +3369,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
kasan_kmalloc(s, ret, size);
+ if (size < s->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
@@ -3823,6 +3829,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags);
+ if (size < s->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
@@ -3853,6 +3862,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
+ if (size < s->object_size)
+ kmemleak_set_size(ret, size);
+
return ret;
}
#endif
--
1.9.1