Groups keyboard shortcuts have been updated
Dismiss
See shortcuts

[PATCH] kcov: add unique cover, edge, and cmp modes

0 views
Skip to first unread message

Joey Jiao

unread,
Jan 10, 2025, 2:33:59 AMJan 10
to dvy...@google.com, andre...@gmail.com, cor...@lwn.net, ak...@linux-foundation.org, gre...@linuxfoundation.org, nog...@google.com, quic_j...@quicinc.com, el...@google.com, pierre....@arm.com, cmll...@google.com, quic_...@quicinc.com, richard...@gmail.com, tg...@linutronix.de, ar...@arndb.de, catalin...@arm.com, wi...@kernel.org, den...@kernel.org, t...@kernel.org, c...@linux.com, ruanj...@huawei.com, col...@suse.de, andriy.s...@linux.intel.com, ker...@quicinc.com, quic_...@quicinc.com, kasa...@googlegroups.com, work...@vger.kernel.org, linu...@vger.kernel.org, linux-...@vger.kernel.org, linux-ar...@lists.infradead.org, linu...@kvack.org
From: "Jiao, Joey" <quic_j...@quicinc.com>

The current design of KCOV risks frequent buffer overflows. To mitigate
this, new modes are introduced: KCOV_TRACE_UNIQ_PC, KCOV_TRACE_UNIQ_EDGE,
and KCOV_TRACE_UNIQ_CMP. These modes allow for the recording of unique
PCs, edges, and comparison operands (CMP).

Key changes include:
- KCOV_TRACE_UNIQ_[PC|EDGE] can be used together to replace KCOV_TRACE_PC.
- KCOV_TRACE_UNIQ_CMP can be used to replace KCOV_TRACE_CMP mode.
- Introduction of hashmaps to store unique coverage data.
- Pre-allocated entries in kcov_map_init during KCOV_INIT_TRACE to avoid
performance issues with kmalloc.
- New structs and functions for managing memory and unique coverage data.
- Example program demonstrating the usage of the new modes.

With the new hashmap and pre-alloced memory pool added, cover size can't
be set to higher value like 1MB in KCOV_TRACE_PC or KCOV_TRACE_CMP modes
in 2GB device with 8 procs, otherwise it causes frequent oom.

For KCOV_TRACE_UNIQ_[PC|EDGE|CMP] modes, smaller cover size like 8KB can
be used.

Signed-off-by: Jiao, Joey <quic_j...@quicinc.com>
---
Documentation/dev-tools/kcov.rst | 243 +++++++++++-----------
arch/arm64/include/asm/irqflags.h | 8 +-
arch/arm64/include/asm/percpu.h | 2 +-
arch/arm64/include/asm/preempt.h | 2 +-
include/linux/kcov.h | 10 +-
include/linux/list.h | 2 +-
include/uapi/linux/kcov.h | 6 +
kernel/kcov.c | 334 +++++++++++++++++++++++++-----
lib/Makefile | 2 +
9 files changed, 429 insertions(+), 180 deletions(-)

diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index 6611434e2dd2..061ae20b867f 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -40,11 +40,12 @@ Coverage data only becomes accessible once debugfs has been mounted::

mount -t debugfs none /sys/kernel/debug

-Coverage collection
+Coverage collection for different modes
-------------------

The following program demonstrates how to use KCOV to collect coverage for a
-single syscall from within a test program:
+single syscall from within a test program, argv[1] can be provided to select
+which mode to enable:

.. code-block:: c

@@ -60,55 +61,130 @@ single syscall from within a test program:
#include <fcntl.h>
#include <linux/types.h>

- #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+ #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
#define KCOV_ENABLE _IO('c', 100)
- #define KCOV_DISABLE _IO('c', 101)
+ #define KCOV_DISABLE _IO('c', 101)
#define COVER_SIZE (64<<10)

#define KCOV_TRACE_PC 0
#define KCOV_TRACE_CMP 1
+ #define KCOV_TRACE_UNIQ_PC 2
+ #define KCOV_TRACE_UNIQ_EDGE 4
+ #define KCOV_TRACE_UNIQ_CMP 8
+
+ /* Number of 64-bit words per record. */
+ #define KCOV_WORDS_PER_CMP 4
+
+ /*
+ * The format for the types of collected comparisons.
+ *
+ * Bit 0 shows whether one of the arguments is a compile-time constant.
+ * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes.
+ */
+
+ #define KCOV_CMP_CONST (1 << 0)
+ #define KCOV_CMP_SIZE(n) ((n) << 1)
+ #define KCOV_CMP_MASK KCOV_CMP_SIZE(3)

int main(int argc, char **argv)
{
- int fd;
- unsigned long *cover, n, i;
-
- /* A single fd descriptor allows coverage collection on a single
- * thread.
- */
- fd = open("/sys/kernel/debug/kcov", O_RDWR);
- if (fd == -1)
- perror("open"), exit(1);
- /* Setup trace mode and trace size. */
- if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
- perror("ioctl"), exit(1);
- /* Mmap buffer shared between kernel- and user-space. */
- cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
- PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if ((void*)cover == MAP_FAILED)
- perror("mmap"), exit(1);
- /* Enable coverage collection on the current thread. */
- if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_PC))
- perror("ioctl"), exit(1);
- /* Reset coverage from the tail of the ioctl() call. */
- __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
- /* Call the target syscall call. */
- read(-1, NULL, 0);
- /* Read number of PCs collected. */
- n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
- for (i = 0; i < n; i++)
- printf("0x%lx\n", cover[i + 1]);
- /* Disable coverage collection for the current thread. After this call
- * coverage can be enabled for a different thread.
- */
- if (ioctl(fd, KCOV_DISABLE, 0))
- perror("ioctl"), exit(1);
- /* Free resources. */
- if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
- perror("munmap"), exit(1);
- if (close(fd))
- perror("close"), exit(1);
- return 0;
+ int fd;
+ unsigned long *cover, *edge, n, n1, i, type, arg1, arg2, is_const, size;
+ unsigned int mode = KCOV_TRACE_PC;
+
+ /* argv[1] controls which mode to use, default to KCOV_TRACE_PC.
+ * supported modes include:
+ * KCOV_TRACE_PC
+ * KCOV_TRACE_CMP
+ * KCOV_TRACE_UNIQ_PC
+ * KCOV_TRACE_UNIQ_EDGE
+ * KCOV_TRACE_UNIQ_PC | KCOV_TRACE_UNIQ_EDGE
+ * KCOV_TRACE_UNIQ_CMP
+ */
+ if (argc > 1)
+ mode = (unsigned int)strtoul(argv[1], NULL, 10);
+ printf("The mode is: %u\n", mode);
+ if (mode != KCOV_TRACE_PC && mode != KCOV_TRACE_CMP &&
+ !(mode & (KCOV_TRACE_UNIQ_PC | KCOV_TRACE_UNIQ_EDGE | KCOV_TRACE_UNIQ_CMP))) {
+ printf("Unsupported mode!\n");
+ exit(1);
+ }
+ /* A single fd descriptor allows coverage collection on a single
+ * thread.
+ */
+ fd = open("/sys/kernel/debug/kcov", O_RDWR);
+ if (fd == -1)
+ perror("open"), exit(1);
+ /* Setup trace mode and trace size. */
+ if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
+ perror("ioctl"), exit(1);
+ /* Mmap buffer shared between kernel- and user-space. */
+ cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if ((void*)cover == MAP_FAILED)
+ perror("mmap"), exit(1);
+ if (mode & KCOV_TRACE_UNIQ_EDGE) {
+ edge = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, COVER_SIZE * sizeof(unsigned long));
+ if ((void*)edge == MAP_FAILED)
+ perror("mmap"), exit(1);
+ }
+ /* Enable coverage collection on the current thread. */
+ if (ioctl(fd, KCOV_ENABLE, mode))
+ perror("ioctl"), exit(1);
+ /* Reset coverage from the tail of the ioctl() call. */
+ __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
+ if (mode & KCOV_TRACE_UNIQ_EDGE)
+ __atomic_store_n(&edge[0], 0, __ATOMIC_RELAXED);
+ /* Call the target syscall call. */
+ read(-1, NULL, 0);
+ /* Read number of PCs collected. */
+ n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+ if (mode & KCOV_TRACE_UNIQ_EDGE)
+ n1 = __atomic_load_n(&edge[0], __ATOMIC_RELAXED);
+ if (mode & (KCOV_TRACE_CMP | KCOV_TRACE_UNIQ_CMP)) {
+ for (i = 0; i < n; i++) {
+ uint64_t ip;
+
+ type = cover[i * KCOV_WORDS_PER_CMP + 1];
+ /* arg1 and arg2 - operands of the comparison. */
+ arg1 = cover[i * KCOV_WORDS_PER_CMP + 2];
+ arg2 = cover[i * KCOV_WORDS_PER_CMP + 3];
+ /* ip - caller address. */
+ ip = cover[i * KCOV_WORDS_PER_CMP + 4];
+ /* size of the operands. */
+ size = 1 << ((type & KCOV_CMP_MASK) >> 1);
+ /* is_const - true if either operand is a compile-time constant.*/
+ is_const = type & KCOV_CMP_CONST;
+ printf("ip: 0x%lx type: 0x%lx, arg1: 0x%lx, arg2: 0x%lx, "
+ "size: %lu, %s\n",
+ ip, type, arg1, arg2, size,
+ is_const ? "const" : "non-const");
+ }
+ } else {
+ for (i = 0; i < n; i++)
+ printf("0x%lx\n", cover[i + 1]);
+ if (mode & KCOV_TRACE_UNIQ_EDGE) {
+ printf("======edge======\n");
+ for (i = 0; i < n1; i++)
+ printf("0x%lx\n", edge[i + 1]);
+ }
+ }
+ /* Disable coverage collection for the current thread. After this call
+ * coverage can be enabled for a different thread.
+ */
+ if (ioctl(fd, KCOV_DISABLE, 0))
+ perror("ioctl"), exit(1);
+ /* Free resources. */
+ if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
+ perror("munmap"), exit(1);
+ if (mode & KCOV_TRACE_UNIQ_EDGE) {
+ if (munmap(edge, COVER_SIZE * sizeof(unsigned long)))
+ perror("munmap"), exit(1);
+ }
+ if (close(fd))
+ perror("close"), exit(1);
+ return 0;
}

After piping through ``addr2line`` the output of the program looks as follows::
@@ -137,85 +213,10 @@ mmaps coverage buffer, and then forks child processes in a loop. The child
processes only need to enable coverage (it gets disabled automatically when
a thread exits).

-Comparison operands collection
-------------------------------
-
-Comparison operands collection is similar to coverage collection:
-
-.. code-block:: c
-
- /* Same includes and defines as above. */
-
- /* Number of 64-bit words per record. */
- #define KCOV_WORDS_PER_CMP 4
-
- /*
- * The format for the types of collected comparisons.
- *
- * Bit 0 shows whether one of the arguments is a compile-time constant.
- * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes.
- */
-
- #define KCOV_CMP_CONST (1 << 0)
- #define KCOV_CMP_SIZE(n) ((n) << 1)
- #define KCOV_CMP_MASK KCOV_CMP_SIZE(3)
-
- int main(int argc, char **argv)
- {
- int fd;
- uint64_t *cover, type, arg1, arg2, is_const, size;
- unsigned long n, i;
-
- fd = open("/sys/kernel/debug/kcov", O_RDWR);
- if (fd == -1)
- perror("open"), exit(1);
- if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
- perror("ioctl"), exit(1);
- /*
- * Note that the buffer pointer is of type uint64_t*, because all
- * the comparison operands are promoted to uint64_t.
- */
- cover = (uint64_t *)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
- PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if ((void*)cover == MAP_FAILED)
- perror("mmap"), exit(1);
- /* Note KCOV_TRACE_CMP instead of KCOV_TRACE_PC. */
- if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_CMP))
- perror("ioctl"), exit(1);
- __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
- read(-1, NULL, 0);
- /* Read number of comparisons collected. */
- n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
- for (i = 0; i < n; i++) {
- uint64_t ip;
-
- type = cover[i * KCOV_WORDS_PER_CMP + 1];
- /* arg1 and arg2 - operands of the comparison. */
- arg1 = cover[i * KCOV_WORDS_PER_CMP + 2];
- arg2 = cover[i * KCOV_WORDS_PER_CMP + 3];
- /* ip - caller address. */
- ip = cover[i * KCOV_WORDS_PER_CMP + 4];
- /* size of the operands. */
- size = 1 << ((type & KCOV_CMP_MASK) >> 1);
- /* is_const - true if either operand is a compile-time constant.*/
- is_const = type & KCOV_CMP_CONST;
- printf("ip: 0x%lx type: 0x%lx, arg1: 0x%lx, arg2: 0x%lx, "
- "size: %lu, %s\n",
- ip, type, arg1, arg2, size,
- is_const ? "const" : "non-const");
- }
- if (ioctl(fd, KCOV_DISABLE, 0))
- perror("ioctl"), exit(1);
- /* Free resources. */
- if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
- perror("munmap"), exit(1);
- if (close(fd))
- perror("close"), exit(1);
- return 0;
- }
-
Note that the KCOV modes (collection of code coverage or comparison operands)
-are mutually exclusive.
+are mutually exclusive, KCOV_TRACE_UNIQ_PC and KCOV_TRACE_UNIQ_EDGE can be
+enabled together.
+

Remote coverage collection
--------------------------
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index d4d7451c2c12..f9a4ccceefcf 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -90,7 +90,7 @@ static __always_inline unsigned long __pmr_local_save_flags(void)
/*
* Save the current interrupt enable state.
*/
-static inline unsigned long arch_local_save_flags(void)
+static __no_sanitize_coverage inline unsigned long arch_local_save_flags(void)
{
if (system_uses_irq_prio_masking()) {
return __pmr_local_save_flags();
@@ -99,17 +99,17 @@ static inline unsigned long arch_local_save_flags(void)
}
}

-static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
+static __no_sanitize_coverage __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
{
return flags & PSR_I_BIT;
}

-static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
+static __no_sanitize_coverage __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
{
return flags != GIC_PRIO_IRQON;
}

-static inline bool arch_irqs_disabled_flags(unsigned long flags)
+static __no_sanitize_coverage inline bool arch_irqs_disabled_flags(unsigned long flags)
{
if (system_uses_irq_prio_masking()) {
return __pmr_irqs_disabled_flags(flags);
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 9abcc8ef3087..a40ff8168151 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -29,7 +29,7 @@ static inline unsigned long __hyp_my_cpu_offset(void)
return read_sysreg(tpidr_el2);
}

-static inline unsigned long __kern_my_cpu_offset(void)
+static __no_sanitize_coverage inline unsigned long __kern_my_cpu_offset(void)
{
unsigned long off;

diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 0159b625cc7f..a8742a57481a 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -8,7 +8,7 @@
#define PREEMPT_NEED_RESCHED BIT(32)
#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)

-static inline int preempt_count(void)
+static __no_sanitize_coverage inline int preempt_count(void)
{
return READ_ONCE(current_thread_info()->preempt.count);
}
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 75a2fb8b16c3..8d577716df42 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -20,9 +20,15 @@ enum kcov_mode {
*/
KCOV_MODE_TRACE_PC = 2,
/* Collecting comparison operands mode. */
- KCOV_MODE_TRACE_CMP = 3,
+ KCOV_MODE_TRACE_CMP = 4,
/* The process owns a KCOV remote reference. */
- KCOV_MODE_REMOTE = 4,
+ KCOV_MODE_REMOTE = 8,
+ /* COllecting uniq pc mode. */
+ KCOV_MODE_TRACE_UNIQ_PC = 16,
+ /* Collecting uniq edge mode. */
+ KCOV_MODE_TRACE_UNIQ_EDGE = 32,
+ /* Collecting uniq cmp mode. */
+ KCOV_MODE_TRACE_UNIQ_CMP = 64,
};

#define KCOV_IN_CTXSW (1 << 30)
diff --git a/include/linux/list.h b/include/linux/list.h
index 29a375889fb8..3dc8876ecb5a 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -1018,7 +1018,7 @@ static inline void hlist_del_init(struct hlist_node *n)
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
-static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+static __no_sanitize_coverage inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
WRITE_ONCE(n->next, first);
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index ed95dba9fa37..08abfca273c9 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -35,6 +35,12 @@ enum {
KCOV_TRACE_PC = 0,
/* Collecting comparison operands mode. */
KCOV_TRACE_CMP = 1,
+ /* Collecting uniq PC mode. */
+ KCOV_TRACE_UNIQ_PC = 2,
+ /* Collecting uniq edge mode. */
+ KCOV_TRACE_UNIQ_EDGE = 4,
+ /* Collecting uniq CMP mode. */
+ KCOV_TRACE_UNIQ_CMP = 8,
};

/*
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 28a6be6e64fd..d86901bc684c 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -9,9 +9,11 @@
#include <linux/types.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/genalloc.h>
#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/jhash.h>
#include <linux/kmsan-checks.h>
#include <linux/mm.h>
#include <linux/preempt.h>
@@ -32,6 +34,34 @@
/* Number of 64-bit words written per one comparison: */
#define KCOV_WORDS_PER_CMP 4

+struct kcov_entry {
+ unsigned long ent;
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+ unsigned long type;
+ unsigned long arg1;
+ unsigned long arg2;
+#endif
+
+ struct hlist_node node;
+};
+
+/* Min gen pool alloc order. */
+#define MIN_POOL_ALLOC_ORDER ilog2(roundup_pow_of_two(sizeof(struct kcov_entry)))
+
+/*
+ * kcov hashmap to store uniq pc|edge|cmp, prealloced mem for kcov_entry
+ * and area shared between kernel and userspace.
+ */
+struct kcov_map {
+ /* 15 bits fit most cases for hash collision, memory and performance. */
+ DECLARE_HASHTABLE(buckets, 15);
+ struct gen_pool *pool;
+ /* Prealloced memory added to pool to be used as kcov_entry. */
+ void *mem;
+ /* Buffer shared with user space. */
+ void *area;
+};
+
/*
* kcov descriptor (one per opened debugfs file).
* State transitions of the descriptor:
@@ -58,8 +88,14 @@ struct kcov {
enum kcov_mode mode;
/* Size of arena (in long's). */
unsigned int size;
+ /* Previous PC. */
+ unsigned long prev_pc;
/* Coverage buffer shared with user space. */
void *area;
+ /* Coverage hashmap for unique pc|cmp. */
+ struct kcov_map *map;
+ /* Edge hashmap for unique edge. */
+ struct kcov_map *map_edge;
/* Task for which we collect coverage, or NULL. */
struct task_struct *t;
/* Collecting coverage from remote (background) threads. */
@@ -171,7 +207,7 @@ static inline bool in_softirq_really(void)
return in_serving_softirq() && !in_hardirq() && !in_nmi();
}

-static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+static notrace unsigned int check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
unsigned int mode;

@@ -191,7 +227,125 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
* kcov_start().
*/
barrier();
- return mode == needed_mode;
+ return mode & needed_mode;
+}
+
+static int kcov_map_init(struct kcov *kcov, unsigned long size, bool edge)
+{
+ struct kcov_map *map;
+ void *area;
+ unsigned long flags;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ area = vmalloc_user(size * sizeof(unsigned long));
+ if (!area) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&kcov->lock, flags);
+ map->area = area;
+
+ if (edge) {
+ kcov->map_edge = map;
+ } else {
+ kcov->map = map;
+ kcov->area = area;
+ }
+ spin_unlock_irqrestore(&kcov->lock, flags);
+
+ hash_init(map->buckets);
+
+ map->pool = gen_pool_create(MIN_POOL_ALLOC_ORDER, -1);
+ if (!map->pool)
+ return -ENOMEM;
+
+ map->mem = vmalloc(size * (1 << MIN_POOL_ALLOC_ORDER));
+ if (!map->mem) {
+ vfree(area);
+ gen_pool_destroy(map->pool);
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ if (gen_pool_add(map->pool, (unsigned long)map->mem, size *
+ (1 << MIN_POOL_ALLOC_ORDER), -1)) {
+ vfree(area);
+ vfree(map->mem);
+ gen_pool_destroy(map->pool);
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline u32 hash_key(const struct kcov_entry *k)
+{
+ return jhash((u32 *)k, offsetof(struct kcov_entry, node), 0);
+}
+
+static notrace inline void kcov_map_add(struct kcov_map *map, struct kcov_entry *ent,
+ struct task_struct *t, unsigned int mode)
+{
+ struct kcov *kcov;
+ struct kcov_entry *entry;
+ unsigned int key = hash_key(ent);
+ unsigned long pos, start_index, end_pos, max_pos, *area;
+
+ kcov = t->kcov;
+
+ if ((mode == KCOV_MODE_TRACE_UNIQ_PC ||
+ mode == KCOV_MODE_TRACE_UNIQ_EDGE))
+ hash_for_each_possible_rcu(map->buckets, entry, node, key) {
+ if (entry->ent == ent->ent)
+ return;
+ }
+ else
+ hash_for_each_possible_rcu(map->buckets, entry, node, key) {
+ if (entry->ent == ent->ent && entry->type == ent->type &&
+ entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
+ return;
+ }
+ }
+
+ entry = (struct kcov_entry *)gen_pool_alloc(map->pool, 1 << MIN_POOL_ALLOC_ORDER);
+ if (unlikely(!entry))
+ return;
+
+ barrier();
+ memcpy(entry, ent, sizeof(*entry));
+ hash_add_rcu(map->buckets, &entry->node, key);
+
+ if (mode == KCOV_MODE_TRACE_UNIQ_PC || mode == KCOV_MODE_TRACE_UNIQ_CMP)
+ area = t->kcov_area;
+ else
+ area = kcov->map_edge->area;
+
+ pos = READ_ONCE(area[0]) + 1;
+ if (mode == KCOV_MODE_TRACE_UNIQ_PC || mode == KCOV_MODE_TRACE_UNIQ_EDGE) {
+ if (likely(pos < t->kcov_size)) {
+ WRITE_ONCE(area[0], pos);
+ barrier();
+ area[pos] = ent->ent;
+ }
+ } else {
+ start_index = 1 + (pos - 1) * KCOV_WORDS_PER_CMP;
+ max_pos = t->kcov_size * sizeof(unsigned long);
+ end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
+ if (likely(end_pos <= max_pos)) {
+ /* See comment in __sanitizer_cov_trace_pc(). */
+ WRITE_ONCE(area[0], pos);
+ barrier();
+ area[start_index] = ent->type;
+ area[start_index + 1] = ent->arg1;
+ area[start_index + 2] = ent->arg2;
+ area[start_index + 3] = ent->ent;
+ }
+ }
}

static notrace unsigned long canonicalize_ip(unsigned long ip)
@@ -212,26 +366,45 @@ void notrace __sanitizer_cov_trace_pc(void)
unsigned long *area;
unsigned long ip = canonicalize_ip(_RET_IP_);
unsigned long pos;
+ struct kcov_entry entry = {0};
+ /* Only hash the lower 12 bits so the hash is independent of any module offsets. */
+ unsigned long mask = (1 << 12) - 1;
+ unsigned int mode;

t = current;
- if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
+ if (!check_kcov_mode(KCOV_MODE_TRACE_PC | KCOV_MODE_TRACE_UNIQ_PC |
+ KCOV_MODE_TRACE_UNIQ_EDGE, t))
return;

- area = t->kcov_area;
- /* The first 64-bit word is the number of subsequent PCs. */
- pos = READ_ONCE(area[0]) + 1;
- if (likely(pos < t->kcov_size)) {
- /* Previously we write pc before updating pos. However, some
- * early interrupt code could bypass check_kcov_mode() check
- * and invoke __sanitizer_cov_trace_pc(). If such interrupt is
- * raised between writing pc and updating pos, the pc could be
- * overitten by the recursive __sanitizer_cov_trace_pc().
- * Update pos before writing pc to avoid such interleaving.
- */
- WRITE_ONCE(area[0], pos);
- barrier();
- area[pos] = ip;
+ mode = t->kcov_mode;
+ if (mode == KCOV_MODE_TRACE_PC) {
+ area = t->kcov_area;
+ /* The first 64-bit word is the number of subsequent PCs. */
+ pos = READ_ONCE(area[0]) + 1;
+ if (likely(pos < t->kcov_size)) {
+ /* Previously we write pc before updating pos. However, some
+ * early interrupt code could bypass check_kcov_mode() check
+ * and invoke __sanitizer_cov_trace_pc(). If such interrupt is
+ * raised between writing pc and updating pos, the pc could be
+ * overitten by the recursive __sanitizer_cov_trace_pc().
+ * Update pos before writing pc to avoid such interleaving.
+ */
+ WRITE_ONCE(area[0], pos);
+ barrier();
+ area[pos] = ip;
+ }
+ } else {
+ if (mode & KCOV_MODE_TRACE_UNIQ_PC) {
+ entry.ent = ip;
+ kcov_map_add(t->kcov->map, &entry, t, KCOV_MODE_TRACE_UNIQ_PC);
+ }
+ if (mode & KCOV_MODE_TRACE_UNIQ_EDGE) {
+ entry.ent = (hash_long(t->kcov->prev_pc & mask, BITS_PER_LONG) & mask) ^ ip;
+ t->kcov->prev_pc = ip;
+ kcov_map_add(t->kcov->map_edge, &entry, t, KCOV_MODE_TRACE_UNIQ_EDGE);
+ }
}
+
}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);

@@ -241,33 +414,44 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
struct task_struct *t;
u64 *area;
u64 count, start_index, end_pos, max_pos;
+ struct kcov_entry entry = {0};
+ unsigned int mode;

t = current;
- if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
+ if (!check_kcov_mode(KCOV_MODE_TRACE_CMP | KCOV_MODE_TRACE_UNIQ_CMP, t))
return;

+ mode = t->kcov_mode;
ip = canonicalize_ip(ip);

- /*
- * We write all comparison arguments and types as u64.
- * The buffer was allocated for t->kcov_size unsigned longs.
- */
- area = (u64 *)t->kcov_area;
- max_pos = t->kcov_size * sizeof(unsigned long);
-
- count = READ_ONCE(area[0]);
-
- /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
- start_index = 1 + count * KCOV_WORDS_PER_CMP;
- end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
- if (likely(end_pos <= max_pos)) {
- /* See comment in __sanitizer_cov_trace_pc(). */
- WRITE_ONCE(area[0], count + 1);
- barrier();
- area[start_index] = type;
- area[start_index + 1] = arg1;
- area[start_index + 2] = arg2;
- area[start_index + 3] = ip;
+ if (mode == KCOV_MODE_TRACE_CMP) {
+ /*
+ * We write all comparison arguments and types as u64.
+ * The buffer was allocated for t->kcov_size unsigned longs.
+ */
+ area = (u64 *)t->kcov_area;
+ max_pos = t->kcov_size * sizeof(unsigned long);
+
+ count = READ_ONCE(area[0]);
+
+ /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
+ start_index = 1 + count * KCOV_WORDS_PER_CMP;
+ end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
+ if (likely(end_pos <= max_pos)) {
+ /* See comment in __sanitizer_cov_trace_pc(). */
+ WRITE_ONCE(area[0], count + 1);
+ barrier();
+ area[start_index] = type;
+ area[start_index + 1] = arg1;
+ area[start_index + 2] = arg2;
+ area[start_index + 3] = ip;
+ }
+ } else {
+ entry.type = type;
+ entry.arg1 = arg1;
+ entry.arg2 = arg2;
+ entry.ent = ip;
+ kcov_map_add(t->kcov->map, &entry, t, KCOV_MODE_TRACE_UNIQ_CMP);
}
}

@@ -432,11 +616,37 @@ static void kcov_get(struct kcov *kcov)
refcount_inc(&kcov->refcount);
}

+static void kcov_map_free(struct kcov *kcov, bool edge)
+{
+ int bkt;
+ struct hlist_node *tmp;
+ struct kcov_entry *entry;
+ struct kcov_map *map;
+
+ if (edge)
+ map = kcov->map_edge;
+ else
+ map = kcov->map;
+ if (!map)
+ return;
+ rcu_read_lock();
+ hash_for_each_safe(map->buckets, bkt, tmp, entry, node) {
+ hash_del_rcu(&entry->node);
+ gen_pool_free(map->pool, (unsigned long)entry, 1 << MIN_POOL_ALLOC_ORDER);
+ }
+ rcu_read_unlock();
+ vfree(map->area);
+ vfree(map->mem);
+ gen_pool_destroy(map->pool);
+ kfree(map);
+}
+
static void kcov_put(struct kcov *kcov)
{
if (refcount_dec_and_test(&kcov->refcount)) {
kcov_remote_reset(kcov);
- vfree(kcov->area);
+ kcov_map_free(kcov, false);
+ kcov_map_free(kcov, true);
kfree(kcov);
}
}
@@ -491,18 +701,27 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
unsigned long size, off;
struct page *page;
unsigned long flags;
+ void *area;

spin_lock_irqsave(&kcov->lock, flags);
size = kcov->size * sizeof(unsigned long);
- if (kcov->area == NULL || vma->vm_pgoff != 0 ||
- vma->vm_end - vma->vm_start != size) {
+ if (!vma->vm_pgoff) {
+ area = kcov->area;
+ } else if (vma->vm_pgoff == size >> PAGE_SHIFT) {
+ area = kcov->map_edge->area;
+ } else {
+ spin_unlock_irqrestore(&kcov->lock, flags);
+ return -EINVAL;
+ }
+
+ if (!area || vma->vm_end - vma->vm_start != size) {
res = -EINVAL;
goto exit;
}
spin_unlock_irqrestore(&kcov->lock, flags);
vm_flags_set(vma, VM_DONTEXPAND);
for (off = 0; off < size; off += PAGE_SIZE) {
- page = vmalloc_to_page(kcov->area + off);
+ page = vmalloc_to_page(area + off);
res = vm_insert_page(vma, vma->vm_start + off, page);
if (res) {
pr_warn_once("kcov: vm_insert_page() failed\n");
@@ -538,6 +757,8 @@ static int kcov_close(struct inode *inode, struct file *filep)

static int kcov_get_mode(unsigned long arg)
{
+ int mode = 0;
+
if (arg == KCOV_TRACE_PC)
return KCOV_MODE_TRACE_PC;
else if (arg == KCOV_TRACE_CMP)
@@ -546,8 +767,20 @@ static int kcov_get_mode(unsigned long arg)
#else
return -ENOTSUPP;
#endif
- else
+ if (arg & KCOV_TRACE_UNIQ_PC)
+ mode |= KCOV_MODE_TRACE_UNIQ_PC;
+ if (arg & KCOV_TRACE_UNIQ_EDGE)
+ mode |= KCOV_MODE_TRACE_UNIQ_EDGE;
+ if (arg == KCOV_TRACE_UNIQ_CMP)
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+ mode = KCOV_MODE_TRACE_UNIQ_CMP;
+#else
+ return -EOPNOTSUPP;
+#endif
+ if (!mode)
return -EINVAL;
+
+ return mode;
}

/*
@@ -600,7 +833,8 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
* at task exit or voluntary by KCOV_DISABLE. After that it can
* be enabled for another task.
*/
- if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
+ if (kcov->mode != KCOV_MODE_INIT || !kcov->area ||
+ !kcov->map_edge->area)
return -EINVAL;
t = current;
if (kcov->t != NULL || t->kcov != NULL)
@@ -698,7 +932,6 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
unsigned int remote_num_handles;
unsigned long remote_arg_size;
unsigned long size, flags;
- void *area;

kcov = filep->private_data;
switch (cmd) {
@@ -713,16 +946,17 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
size = arg;
if (size < 2 || size > INT_MAX / sizeof(unsigned long))
return -EINVAL;
- area = vmalloc_user(size * sizeof(unsigned long));
- if (area == NULL)
- return -ENOMEM;
+ res = kcov_map_init(kcov, size, false);
+ if (res)
+ return res;
+ res = kcov_map_init(kcov, size, true);
+ if (res)
+ return res;
spin_lock_irqsave(&kcov->lock, flags);
if (kcov->mode != KCOV_MODE_DISABLED) {
spin_unlock_irqrestore(&kcov->lock, flags);
- vfree(area);
return -EBUSY;
}
- kcov->area = area;
kcov->size = size;
kcov->mode = KCOV_MODE_INIT;
spin_unlock_irqrestore(&kcov->lock, flags);
diff --git a/lib/Makefile b/lib/Makefile
index a8155c972f02..7a110a9a4a52 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -15,6 +15,8 @@ KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
KCOV_INSTRUMENT_fault-inject.o := n
KCOV_INSTRUMENT_find_bit.o := n
+KCOV_INSTRUMENT_genalloc.o := n
+KCOV_INSTRUMENT_bitmap.o := n

# string.o implements standard library functions like memset/memcpy etc.
# Use -ffreestanding to ensure that the compiler does not try to "optimize"

base-commit: 9b2ffa6148b1e4468d08f7e0e7e371c43cac9ffe
--
2.47.1

Marco Elver

unread,
Jan 10, 2025, 4:23:22 AMJan 10
to Joey Jiao, dvy...@google.com, andre...@gmail.com, cor...@lwn.net, ak...@linux-foundation.org, gre...@linuxfoundation.org, nog...@google.com, pierre....@arm.com, cmll...@google.com, quic_...@quicinc.com, richard...@gmail.com, tg...@linutronix.de, ar...@arndb.de, catalin...@arm.com, wi...@kernel.org, den...@kernel.org, t...@kernel.org, c...@linux.com, ruanj...@huawei.com, col...@suse.de, andriy.s...@linux.intel.com, ker...@quicinc.com, quic_...@quicinc.com, kasa...@googlegroups.com, work...@vger.kernel.org, linu...@vger.kernel.org, linux-...@vger.kernel.org, linux-ar...@lists.infradead.org, linu...@kvack.org
On Fri, 10 Jan 2025 at 08:33, Joey Jiao <quic_j...@quicinc.com> wrote:
>
> From: "Jiao, Joey" <quic_j...@quicinc.com>
>
> The current design of KCOV risks frequent buffer overflows. To mitigate
> this, new modes are introduced: KCOV_TRACE_UNIQ_PC, KCOV_TRACE_UNIQ_EDGE,
> and KCOV_TRACE_UNIQ_CMP. These modes allow for the recording of unique
> PCs, edges, and comparison operands (CMP).

There ought to be a cover letter explaining the motivation for this,
and explaining why the new modes would help. Ultimately, what are you
using KCOV for where you encountered this problem?

> Key changes include:
> - KCOV_TRACE_UNIQ_[PC|EDGE] can be used together to replace KCOV_TRACE_PC.
> - KCOV_TRACE_UNIQ_CMP can be used to replace KCOV_TRACE_CMP mode.
> - Introduction of hashmaps to store unique coverage data.
> - Pre-allocated entries in kcov_map_init during KCOV_INIT_TRACE to avoid
> performance issues with kmalloc.
> - New structs and functions for managing memory and unique coverage data.
> - Example program demonstrating the usage of the new modes.

This should be a patch series, carefully splitting each change into a
separate patch.
https://docs.kernel.org/process/submitting-patches.html#split-changes

> With the new hashmap and pre-alloced memory pool added, cover size can't
> be set to higher value like 1MB in KCOV_TRACE_PC or KCOV_TRACE_CMP modes
> in 2GB device with 8 procs, otherwise it causes frequent oom.
>
> For KCOV_TRACE_UNIQ_[PC|EDGE|CMP] modes, smaller cover size like 8KB can
> be used.
>
> Signed-off-by: Jiao, Joey <quic_j...@quicinc.com>

As-is it's hard to review, and the motivation is unclear. A lot of
code was moved and changed, and reviewers need to understand why that
was done besides your brief explanation above.

Generally, KCOV has very tricky constraints, due to being callable
from any context, including NMI. This means adding new dependencies
need to be carefully reviewed. For one, we can see this in genalloc's
header:

> * The lockless operation only works if there is enough memory
> * available. If new memory is added to the pool a lock has to be
> * still taken. So any user relying on locklessness has to ensure
> * that sufficient memory is preallocated.
> *
> * The basic atomic operation of this allocator is cmpxchg on long.
> * On architectures that don't have NMI-safe cmpxchg implementation,
> * the allocator can NOT be used in NMI handler. So code uses the
> * allocator in NMI handler should depend on
> * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.

And you are calling gen_pool_alloc() from __sanitizer_cov_trace_pc.
Which means this implementation is likely broken on
!CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG architectures (do we have
architectures like that, that support KCOV?).

There are probably other sharp corners due to the contexts KCOV can
run in, but would simply ask you to carefully reason about why each
new dependency is safe.

Dmitry Vyukov

unread,
Jan 10, 2025, 7:16:41 AMJan 10
to Marco Elver, Joey Jiao, andre...@gmail.com, cor...@lwn.net, ak...@linux-foundation.org, gre...@linuxfoundation.org, nog...@google.com, pierre....@arm.com, cmll...@google.com, quic_...@quicinc.com, richard...@gmail.com, tg...@linutronix.de, ar...@arndb.de, catalin...@arm.com, wi...@kernel.org, den...@kernel.org, t...@kernel.org, c...@linux.com, ruanj...@huawei.com, col...@suse.de, andriy.s...@linux.intel.com, ker...@quicinc.com, quic_...@quicinc.com, kasa...@googlegroups.com, work...@vger.kernel.org, linu...@vger.kernel.org, linux-...@vger.kernel.org, linux-ar...@lists.infradead.org, linu...@kvack.org
I am also concerned about the performance effect. Does it add a stack
frame to __sanitizer_cov_trace_pc()? Please show disassm of the
function before/after.

Also, I have concerns about interrupts and reentrancy. We are still
getting some reentrant calls from interrupts (not all of them are
filtered by in_task() check). I am afraid these complex hashmaps will
corrupt.

kernel test robot

unread,
Jan 11, 2025, 4:03:33 AMJan 11
to Joey Jiao, dvy...@google.com, andre...@gmail.com, cor...@lwn.net, ak...@linux-foundation.org, gre...@linuxfoundation.org, nog...@google.com, el...@google.com, pierre....@arm.com, cmll...@google.com, quic_...@quicinc.com, richard...@gmail.com, tg...@linutronix.de, ar...@arndb.de, catalin...@arm.com, wi...@kernel.org, den...@kernel.org, t...@kernel.org, c...@linux.com, ruanj...@huawei.com, col...@suse.de, andriy.s...@linux.intel.com, oe-kbu...@lists.linux.dev, ker...@quicinc.com, quic_...@quicinc.com, kasa...@googlegroups.com, work...@vger.kernel.org, linu...@vger.kernel.org, linux-...@vger.kernel.org, linux-ar...@lists.infradead.org, linu...@kvack.org
Hi Joey,

kernel test robot noticed the following build errors:

[auto build test ERROR on 9b2ffa6148b1e4468d08f7e0e7e371c43cac9ffe]

url: https://github.com/intel-lab-lkp/linux/commits/Joey-Jiao/kcov-add-unique-cover-edge-and-cmp-modes/20250110-153559
base: 9b2ffa6148b1e4468d08f7e0e7e371c43cac9ffe
patch link: https://lore.kernel.org/r/20250110073056.2594638-1-quic_jiangenj%40quicinc.com
patch subject: [PATCH] kcov: add unique cover, edge, and cmp modes
config: arm-randconfig-002-20250111 (https://download.01.org/0day-ci/archive/20250111/202501111600...@intel.com/config)
compiler: arm-linux-gnueabi-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250111/202501111600...@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <l...@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501111600...@intel.com/

All errors (new ones prefixed by >>):

kernel/kcov.c: In function 'kcov_map_add':
>> kernel/kcov.c:309:60: error: 'struct kcov_entry' has no member named 'type'
309 | if (entry->ent == ent->ent && entry->type == ent->type &&
| ^~
kernel/kcov.c:309:73: error: 'struct kcov_entry' has no member named 'type'
309 | if (entry->ent == ent->ent && entry->type == ent->type &&
| ^~
>> kernel/kcov.c:310:34: error: 'struct kcov_entry' has no member named 'arg1'
310 | entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
| ^~
kernel/kcov.c:310:47: error: 'struct kcov_entry' has no member named 'arg1'
310 | entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
| ^~
>> kernel/kcov.c:310:62: error: 'struct kcov_entry' has no member named 'arg2'
310 | entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
| ^~
kernel/kcov.c:310:75: error: 'struct kcov_entry' has no member named 'arg2'
310 | entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
| ^~
kernel/kcov.c:343:48: error: 'struct kcov_entry' has no member named 'type'
343 | area[start_index] = ent->type;
| ^~
kernel/kcov.c:344:52: error: 'struct kcov_entry' has no member named 'arg1'
344 | area[start_index + 1] = ent->arg1;
| ^~
kernel/kcov.c:345:52: error: 'struct kcov_entry' has no member named 'arg2'
345 | area[start_index + 2] = ent->arg2;
| ^~


vim +309 kernel/kcov.c

290
291 static notrace inline void kcov_map_add(struct kcov_map *map, struct kcov_entry *ent,
292 struct task_struct *t, unsigned int mode)
293 {
294 struct kcov *kcov;
295 struct kcov_entry *entry;
296 unsigned int key = hash_key(ent);
297 unsigned long pos, start_index, end_pos, max_pos, *area;
298
299 kcov = t->kcov;
300
301 if ((mode == KCOV_MODE_TRACE_UNIQ_PC ||
302 mode == KCOV_MODE_TRACE_UNIQ_EDGE))
303 hash_for_each_possible_rcu(map->buckets, entry, node, key) {
304 if (entry->ent == ent->ent)
305 return;
306 }
307 else
308 hash_for_each_possible_rcu(map->buckets, entry, node, key) {
> 309 if (entry->ent == ent->ent && entry->type == ent->type &&
> 310 entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
311 return;
312 }
313 }
314
315 entry = (struct kcov_entry *)gen_pool_alloc(map->pool, 1 << MIN_POOL_ALLOC_ORDER);
316 if (unlikely(!entry))
317 return;
318
319 barrier();
320 memcpy(entry, ent, sizeof(*entry));
321 hash_add_rcu(map->buckets, &entry->node, key);
322
323 if (mode == KCOV_MODE_TRACE_UNIQ_PC || mode == KCOV_MODE_TRACE_UNIQ_CMP)
324 area = t->kcov_area;
325 else
326 area = kcov->map_edge->area;
327
328 pos = READ_ONCE(area[0]) + 1;
329 if (mode == KCOV_MODE_TRACE_UNIQ_PC || mode == KCOV_MODE_TRACE_UNIQ_EDGE) {
330 if (likely(pos < t->kcov_size)) {
331 WRITE_ONCE(area[0], pos);
332 barrier();
333 area[pos] = ent->ent;
334 }
335 } else {
336 start_index = 1 + (pos - 1) * KCOV_WORDS_PER_CMP;
337 max_pos = t->kcov_size * sizeof(unsigned long);
338 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
339 if (likely(end_pos <= max_pos)) {
340 /* See comment in __sanitizer_cov_trace_pc(). */
341 WRITE_ONCE(area[0], pos);
342 barrier();
343 area[start_index] = ent->type;
344 area[start_index + 1] = ent->arg1;
345 area[start_index + 2] = ent->arg2;
346 area[start_index + 3] = ent->ent;
347 }
348 }
349 }
350

--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

kernel test robot

unread,
Jan 11, 2025, 9:58:22 PMJan 11
to Joey Jiao, dvy...@google.com, andre...@gmail.com, cor...@lwn.net, ak...@linux-foundation.org, gre...@linuxfoundation.org, nog...@google.com, el...@google.com, pierre....@arm.com, cmll...@google.com, quic_...@quicinc.com, richard...@gmail.com, tg...@linutronix.de, ar...@arndb.de, catalin...@arm.com, wi...@kernel.org, den...@kernel.org, t...@kernel.org, c...@linux.com, ruanj...@huawei.com, col...@suse.de, andriy.s...@linux.intel.com, ll...@lists.linux.dev, oe-kbu...@lists.linux.dev, ker...@quicinc.com, quic_...@quicinc.com, kasa...@googlegroups.com, work...@vger.kernel.org, linu...@vger.kernel.org, linux-...@vger.kernel.org, linux-ar...@lists.infradead.org, linu...@kvack.org
Hi Joey,

kernel test robot noticed the following build errors:

[auto build test ERROR on 9b2ffa6148b1e4468d08f7e0e7e371c43cac9ffe]

url: https://github.com/intel-lab-lkp/linux/commits/Joey-Jiao/kcov-add-unique-cover-edge-and-cmp-modes/20250110-153559
base: 9b2ffa6148b1e4468d08f7e0e7e371c43cac9ffe
patch link: https://lore.kernel.org/r/20250110073056.2594638-1-quic_jiangenj%40quicinc.com
patch subject: [PATCH] kcov: add unique cover, edge, and cmp modes
config: um-randconfig-002-20250112 (https://download.01.org/0day-ci/archive/20250112/202501121036...@intel.com/config)
compiler: clang version 20.0.0git (https://github.com/llvm/llvm-project f5cd181ffbb7cb61d582fe130d46580d5969d47a)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250112/202501121036...@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <l...@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501121036...@intel.com/

All errors (new ones prefixed by >>):

>> kernel/kcov.c:309:41: error: no member named 'type' in 'struct kcov_entry'
309 | if (entry->ent == ent->ent && entry->type == ent->type &&
| ~~~~~ ^
kernel/kcov.c:309:54: error: no member named 'type' in 'struct kcov_entry'
309 | if (entry->ent == ent->ent && entry->type == ent->type &&
| ~~~ ^
>> kernel/kcov.c:310:15: error: no member named 'arg1' in 'struct kcov_entry'
310 | entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
| ~~~~~ ^
kernel/kcov.c:310:28: error: no member named 'arg1' in 'struct kcov_entry'
310 | entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
| ~~~ ^
>> kernel/kcov.c:310:43: error: no member named 'arg2' in 'struct kcov_entry'
310 | entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
| ~~~~~ ^
kernel/kcov.c:310:56: error: no member named 'arg2' in 'struct kcov_entry'
310 | entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
| ~~~ ^
kernel/kcov.c:343:29: error: no member named 'type' in 'struct kcov_entry'
343 | area[start_index] = ent->type;
| ~~~ ^
kernel/kcov.c:344:33: error: no member named 'arg1' in 'struct kcov_entry'
344 | area[start_index + 1] = ent->arg1;
| ~~~ ^
kernel/kcov.c:345:33: error: no member named 'arg2' in 'struct kcov_entry'
345 | area[start_index + 2] = ent->arg2;
| ~~~ ^
9 errors generated.

Joey Jiao

unread,
Jan 14, 2025, 12:44:32 AMJan 14
to Marco Elver, dvy...@google.com, andre...@gmail.com, cor...@lwn.net, ak...@linux-foundation.org, gre...@linuxfoundation.org, nog...@google.com, pierre....@arm.com, cmll...@google.com, quic_...@quicinc.com, richard...@gmail.com, tg...@linutronix.de, ar...@arndb.de, catalin...@arm.com, wi...@kernel.org, den...@kernel.org, t...@kernel.org, c...@linux.com, ruanj...@huawei.com, col...@suse.de, andriy.s...@linux.intel.com, ker...@quicinc.com, quic_...@quicinc.com, kasa...@googlegroups.com, work...@vger.kernel.org, linu...@vger.kernel.org, linux-...@vger.kernel.org, linux-ar...@lists.infradead.org, linu...@kvack.org
On Fri, Jan 10, 2025 at 10:22:44AM +0100, Marco Elver wrote:
> On Fri, 10 Jan 2025 at 08:33, Joey Jiao <quic_j...@quicinc.com> wrote:
> >
> > From: "Jiao, Joey" <quic_j...@quicinc.com>
> >
> > The current design of KCOV risks frequent buffer overflows. To mitigate
> > this, new modes are introduced: KCOV_TRACE_UNIQ_PC, KCOV_TRACE_UNIQ_EDGE,
> > and KCOV_TRACE_UNIQ_CMP. These modes allow for the recording of unique
> > PCs, edges, and comparison operands (CMP).
>
> There ought to be a cover letter explaining the motivation for this,
> and explaining why the new modes would help. Ultimately, what are you
> using KCOV for where you encountered this problem?
>
> > Key changes include:
> > - KCOV_TRACE_UNIQ_[PC|EDGE] can be used together to replace KCOV_TRACE_PC.
> > - KCOV_TRACE_UNIQ_CMP can be used to replace KCOV_TRACE_CMP mode.
> > - Introduction of hashmaps to store unique coverage data.
> > - Pre-allocated entries in kcov_map_init during KCOV_INIT_TRACE to avoid
> > performance issues with kmalloc.
> > - New structs and functions for managing memory and unique coverage data.
> > - Example program demonstrating the usage of the new modes.
>
> This should be a patch series, carefully splitting each change into a
> separate patch.
> https://docs.kernel.org/process/submitting-patches.html#split-changes
Done in `20250114-kcov-v...@quicinc.com`
Need to investigate more on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.

Joey Jiao

unread,
Jan 14, 2025, 12:57:21 AMJan 14
to Dmitry Vyukov, Marco Elver, andre...@gmail.com, cor...@lwn.net, ak...@linux-foundation.org, gre...@linuxfoundation.org, nog...@google.com, pierre....@arm.com, cmll...@google.com, quic_...@quicinc.com, richard...@gmail.com, tg...@linutronix.de, ar...@arndb.de, catalin...@arm.com, wi...@kernel.org, den...@kernel.org, t...@kernel.org, c...@linux.com, ruanj...@huawei.com, col...@suse.de, andriy.s...@linux.intel.com, ker...@quicinc.com, quic_...@quicinc.com, kasa...@googlegroups.com, work...@vger.kernel.org, linu...@vger.kernel.org, linux-...@vger.kernel.org, linux-ar...@lists.infradead.org, linu...@kvack.org
# Before the patch
ffffffff8195df30 __sanitizer_cov_trace_pc:
ffffffff8195df30: f3 0f 1e fa endbr64
ffffffff8195df34: 48 8b 04 24 movq (%rsp), %rax
ffffffff8195df38: 65 48 8b 0c 25 00 d6 03 00 movq %gs:251392, %rcx
ffffffff8195df41: 65 8b 15 c0 f6 6d 7e movl %gs:2121135808(%rip), %edx
ffffffff8195df48: 81 e2 00 01 ff 00 andl $16711936, %edx
ffffffff8195df4e: 74 11 je 17
<__sanitizer_cov_trace_pc+0x31>
ffffffff8195df50: 81 fa 00 01 00 00 cmpl $256, %edx
ffffffff8195df56: 75 35 jne 53
<__sanitizer_cov_trace_pc+0x5d>
ffffffff8195df58: 83 b9 1c 16 00 00 00 cmpl $0, 5660(%rcx)
ffffffff8195df5f: 74 2c je 44
<__sanitizer_cov_trace_pc+0x5d>
ffffffff8195df61: 8b 91 f8 15 00 00 movl 5624(%rcx), %edx
ffffffff8195df67: 83 fa 02 cmpl $2, %edx
ffffffff8195df6a: 75 21 jne 33
<__sanitizer_cov_trace_pc+0x5d>
ffffffff8195df6c: 48 8b 91 00 16 00 00 movq 5632(%rcx), %rdx
ffffffff8195df73: 48 8b 32 movq (%rdx), %rsi
ffffffff8195df76: 48 8d 7e 01 leaq 1(%rsi), %rdi
ffffffff8195df7a: 8b 89 fc 15 00 00 movl 5628(%rcx), %ecx
ffffffff8195df80: 48 39 cf cmpq %rcx, %rdi
ffffffff8195df83: 73 08 jae 8
<__sanitizer_cov_trace_pc+0x5d>
ffffffff8195df85: 48 89 3a movq %rdi, (%rdx)
ffffffff8195df88: 48 89 44 f2 08 movq %rax, 8(%rdx,%rsi,8)
ffffffff8195df8d: 2e e9 cd 3d 8b 09 jmp 160120269 <__x86_return_thunk>
ffffffff8195df93: 66 2e 0f 1f 84 00 00 00 00 00 nopw %cs:(%rax,%rax)
ffffffff8195df9d: 0f 1f 00 nopl (%rax)

# After the patch

vmlinux: file format ELF64-x86-64


Disassembly of section .text:

ffffffff8195df30 __sanitizer_cov_trace_pc:
ffffffff8195df30: f3 0f 1e fa endbr64
ffffffff8195df34: 41 57 pushq %r15
ffffffff8195df36: 41 56 pushq %r14
ffffffff8195df38: 41 54 pushq %r12
ffffffff8195df3a: 53 pushq %rbx
ffffffff8195df3b: 48 8b 5c 24 20 movq 32(%rsp), %rbx
ffffffff8195df40: 65 4c 8b 34 25 00 d6 03 00 movq %gs:251392, %r14
ffffffff8195df49: 65 8b 05 b8 f6 6d 7e movl %gs:2121135800(%rip), %eax
ffffffff8195df50: 25 00 01 ff 00 andl $16711936, %eax
ffffffff8195df55: 74 19 je 25
<__sanitizer_cov_trace_pc+0x40>
ffffffff8195df57: 3d 00 01 00 00 cmpl $256, %eax
ffffffff8195df5c: 0f 85 54 01 00 00 jne 340
<__sanitizer_cov_trace_pc+0x186>
ffffffff8195df62: 41 83 be 1c 16 00 00 00 cmpl $0, 5660(%r14)
ffffffff8195df6a: 0f 84 46 01 00 00 je 326
<__sanitizer_cov_trace_pc+0x186>
ffffffff8195df70: 41 8b 86 f8 15 00 00 movl 5624(%r14), %eax
ffffffff8195df77: a9 12 00 00 00 testl $18, %eax
ffffffff8195df7c: 0f 84 34 01 00 00 je 308
<__sanitizer_cov_trace_pc+0x186>
ffffffff8195df82: 41 83 be f8 15 00 00 02 cmpl $2, 5624(%r14)
ffffffff8195df8a: 75 25 jne 37
<__sanitizer_cov_trace_pc+0x81>
ffffffff8195df8c: 49 8b 86 00 16 00 00 movq 5632(%r14), %rax
ffffffff8195df93: 48 8b 08 movq (%rax), %rcx
ffffffff8195df96: 48 ff c1 incq %rcx
ffffffff8195df99: 41 8b 96 fc 15 00 00 movl 5628(%r14), %edx
ffffffff8195dfa0: 48 39 d1 cmpq %rdx, %rcx
ffffffff8195dfa3: 0f 83 0d 01 00 00 jae 269
<__sanitizer_cov_trace_pc+0x186>
ffffffff8195dfa9: 48 89 08 movq %rcx, (%rax)
ffffffff8195dfac: e9 fe 00 00 00 jmp 254
<__sanitizer_cov_trace_pc+0x17f>
ffffffff8195dfb1: 48 89 d8 movq %rbx, %rax
ffffffff8195dfb4: 48 c1 e8 20 shrq $32, %rax
ffffffff8195dfb8: 49 8b 8e 08 16 00 00 movq 5640(%r14), %rcx
ffffffff8195dfbf: 4c 8b 79 58 movq 88(%rcx), %r15
ffffffff8195dfc3: 05 f7 be ad de addl $3735928567, %eax
ffffffff8195dfc8: 8d 93 f7 be ad de leal -559038729(%rbx), %edx
ffffffff8195dfce: 89 c1 movl %eax, %ecx
ffffffff8195dfd0: 81 f1 f7 be ad de xorl $3735928567, %ecx
ffffffff8195dfd6: 89 c6 movl %eax, %esi
ffffffff8195dfd8: c1 c6 0e roll $14, %esi
ffffffff8195dfdb: 29 f1 subl %esi, %ecx
ffffffff8195dfdd: 31 ca xorl %ecx, %edx
ffffffff8195dfdf: 89 ce movl %ecx, %esi
ffffffff8195dfe1: c1 c6 0b roll $11, %esi
ffffffff8195dfe4: 29 f2 subl %esi, %edx
ffffffff8195dfe6: 31 d0 xorl %edx, %eax
ffffffff8195dfe8: 89 d6 movl %edx, %esi
ffffffff8195dfea: c1 c6 19 roll $25, %esi
ffffffff8195dfed: 29 f0 subl %esi, %eax
ffffffff8195dfef: 89 c6 movl %eax, %esi
ffffffff8195dff1: c1 c6 10 roll $16, %esi
ffffffff8195dff4: 31 c1 xorl %eax, %ecx
ffffffff8195dff6: 29 f1 subl %esi, %ecx
ffffffff8195dff8: 31 ca xorl %ecx, %edx
ffffffff8195dffa: 89 ce movl %ecx, %esi
ffffffff8195dffc: c1 c6 04 roll $4, %esi
ffffffff8195dfff: 29 f2 subl %esi, %edx
ffffffff8195e001: 31 d0 xorl %edx, %eax
ffffffff8195e003: c1 c2 0e roll $14, %edx
ffffffff8195e006: 29 d0 subl %edx, %eax
ffffffff8195e008: 89 c2 movl %eax, %edx
ffffffff8195e00a: c1 c2 18 roll $24, %edx
ffffffff8195e00d: 31 c8 xorl %ecx, %eax
ffffffff8195e00f: 29 d0 subl %edx, %eax
ffffffff8195e011: 44 69 e0 47 86 c8 61 imull $1640531527, %eax, %r12d
ffffffff8195e018: 41 c1 ec 11 shrl $17, %r12d
ffffffff8195e01c: 4b 8b 04 e7 movq (%r15,%r12,8), %rax
ffffffff8195e020: 48 85 c0 testq %rax, %rax
ffffffff8195e023: 74 18 je 24
<__sanitizer_cov_trace_pc+0x10d>
ffffffff8195e025: 48 83 c0 f8 addq $-8, %rax
ffffffff8195e029: 74 12 je 18
<__sanitizer_cov_trace_pc+0x10d>
ffffffff8195e02b: 48 39 18 cmpq %rbx, (%rax)
ffffffff8195e02e: 0f 84 82 00 00 00 je 130
<__sanitizer_cov_trace_pc+0x186>
ffffffff8195e034: 48 8b 40 08 movq 8(%rax), %rax
ffffffff8195e038: 48 85 c0 testq %rax, %rax
ffffffff8195e03b: 75 e8 jne -24
<__sanitizer_cov_trace_pc+0xf5>
ffffffff8195e03d: 49 8b bf 00 00 04 00 movq 262144(%r15), %rdi
ffffffff8195e044: 48 8b 57 58 movq 88(%rdi), %rdx
ffffffff8195e048: 48 8b 4f 60 movq 96(%rdi), %rcx
ffffffff8195e04c: be 20 00 00 00 movl $32, %esi
ffffffff8195e051: 45 31 c0 xorl %r8d, %r8d
ffffffff8195e054: e8 47 b4 f0 02 callq 49329223
<gen_pool_alloc_algo_owner>
ffffffff8195e059: 48 85 c0 testq %rax, %rax
ffffffff8195e05c: 74 58 je 88
<__sanitizer_cov_trace_pc+0x186>
ffffffff8195e05e: 4b 8d 14 e7 leaq (%r15,%r12,8), %rdx
ffffffff8195e062: 48 89 c6 movq %rax, %rsi
ffffffff8195e065: 48 89 18 movq %rbx, (%rax)
ffffffff8195e068: 48 83 c0 08 addq $8, %rax
ffffffff8195e06c: 48 c7 46 08 00 00 00 00 movq $0, 8(%rsi)
ffffffff8195e074: 48 c7 46 10 00 00 00 00 movq $0, 16(%rsi)
ffffffff8195e07c: 48 8b 0a movq (%rdx), %rcx
ffffffff8195e07f: 48 89 4e 08 movq %rcx, 8(%rsi)
ffffffff8195e083: 48 89 56 10 movq %rdx, 16(%rsi)
ffffffff8195e087: 48 89 02 movq %rax, (%rdx)
ffffffff8195e08a: 48 85 c9 testq %rcx, %rcx
ffffffff8195e08d: 74 04 je 4
<__sanitizer_cov_trace_pc+0x163>
ffffffff8195e08f: 48 89 41 08 movq %rax, 8(%rcx)
ffffffff8195e093: 49 8b 86 00 16 00 00 movq 5632(%r14), %rax
ffffffff8195e09a: 48 8b 08 movq (%rax), %rcx
ffffffff8195e09d: 48 ff c1 incq %rcx
ffffffff8195e0a0: 41 8b 96 fc 15 00 00 movl 5628(%r14), %edx
ffffffff8195e0a7: 48 39 d1 cmpq %rdx, %rcx
ffffffff8195e0aa: 73 0a jae 10
<__sanitizer_cov_trace_pc+0x186>
ffffffff8195e0ac: 48 89 08 movq %rcx, (%rax)
ffffffff8195e0af: 48 8d 04 c8 leaq (%rax,%rcx,8), %rax
ffffffff8195e0b3: 48 89 18 movq %rbx, (%rax)
ffffffff8195e0b6: 5b popq %rbx
ffffffff8195e0b7: 41 5c popq %r12
ffffffff8195e0b9: 41 5e popq %r14
ffffffff8195e0bb: 41 5f popq %r15
ffffffff8195e0bd: 2e e9 9d 3c 8b 09 jmp 160119965 <__x86_return_thunk>
ffffffff8195e0c3: 66 2e 0f 1f 84 00 00 00 00 00 nopw %cs:(%rax,%rax)
ffffffff8195e0cd: 0f 1f 00 nopl (%rax)


So frame to gen_pool_alloc_algo_owner has been added, and instr needs to be
disabled for it.

>
> Also, I have concerns about interrupts and reentrancy. We are still
> getting some reentrant calls from interrupts (not all of them are
> filtered by in_task() check). I am afraid these complex hashmaps will
> corrupt.
Need more investigate and advice on better way to have uniq info stored.
Reply all
Reply to author
Forward
0 new messages