[PATCH v2 1/3] s390: Always declare __mem functions

2 views
Skip to first unread message

Marco Elver

unread,
Sep 9, 2022, 3:38:53 AMSep 9
to el...@google.com, Paul E. McKenney, Mark Rutland, Dmitry Vyukov, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Heiko Carstens, Vasily Gorbik, Alexander Gordeev, Christian Borntraeger, Sven Schnelle, Peter Zijlstra, linux...@vger.kernel.org
Like other architectures, always declare __mem*() functions if the
architecture defines __HAVE_ARCH_MEM*.

For example, this is required by sanitizer runtimes to unambiguously
refer to the arch versions of the mem-functions, and the compiler not
attempting any "optimizations" such as replacing the calls with builtins
(which may later be inlined etc.).

Signed-off-by: Marco Elver <el...@google.com>
---
v2:
* New patch.
---
arch/s390/include/asm/string.h | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 3fae93ddb322..2c3c48d526b9 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -20,8 +20,11 @@
#define __HAVE_ARCH_MEMSET64 /* arch function */

void *memcpy(void *dest, const void *src, size_t n);
+void *__memcpy(void *dest, const void *src, size_t n);
void *memset(void *s, int c, size_t n);
+void *__memset(void *s, int c, size_t n);
void *memmove(void *dest, const void *src, size_t n);
+void *__memmove(void *dest, const void *src, size_t n);

#ifndef CONFIG_KASAN
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
@@ -55,10 +58,6 @@ char *strstr(const char *s1, const char *s2);

#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)

-extern void *__memcpy(void *dest, const void *src, size_t n);
-extern void *__memset(void *s, int c, size_t n);
-extern void *__memmove(void *dest, const void *src, size_t n);
-
/*
* For files that are not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
--
2.37.2.789.g6183377224-goog

Marco Elver

unread,
Sep 9, 2022, 3:38:56 AMSep 9
to el...@google.com, Paul E. McKenney, Mark Rutland, Dmitry Vyukov, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Heiko Carstens, Vasily Gorbik, Alexander Gordeev, Christian Borntraeger, Sven Schnelle, Peter Zijlstra, linux...@vger.kernel.org, sta...@vger.kernel.org
With Clang version 16+, -fsanitize=thread will turn
memcpy/memset/memmove calls in instrumented functions into
__tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.

Add these functions to the core KCSAN runtime, so that we (a) catch data
races with mem* functions, and (b) won't run into linker errors with
such newer compilers.

Cc: sta...@vger.kernel.org # v5.10+
Signed-off-by: Marco Elver <el...@google.com>
---
v2:
* Fix for architectures which do not provide their own
memcpy/memset/memmove and instead use the generic versions in
lib/string. In this case we'll just alias the __tsan_ variants.
---
kernel/kcsan/core.c | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index fe12dfe254ec..4015f2a3e7f6 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -18,6 +18,7 @@
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/sched.h>
+#include <linux/string.h>
#include <linux/uaccess.h>

#include "encoding.h"
@@ -1308,3 +1309,41 @@ noinline void __tsan_atomic_signal_fence(int memorder)
}
}
EXPORT_SYMBOL(__tsan_atomic_signal_fence);
+
+#ifdef __HAVE_ARCH_MEMSET
+void *__tsan_memset(void *s, int c, size_t count);
+noinline void *__tsan_memset(void *s, int c, size_t count)
+{
+ check_access(s, count, KCSAN_ACCESS_WRITE, _RET_IP_);
+ return __memset(s, c, count);
+}
+#else
+void *__tsan_memset(void *s, int c, size_t count) __alias(memset);
+#endif
+EXPORT_SYMBOL(__tsan_memset);
+
+#ifdef __HAVE_ARCH_MEMMOVE
+void *__tsan_memmove(void *dst, const void *src, size_t len);
+noinline void *__tsan_memmove(void *dst, const void *src, size_t len)
+{
+ check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_);
+ check_access(src, len, 0, _RET_IP_);
+ return __memmove(dst, src, len);
+}
+#else
+void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove);
+#endif
+EXPORT_SYMBOL(__tsan_memmove);
+
+#ifdef __HAVE_ARCH_MEMCPY
+void *__tsan_memcpy(void *dst, const void *src, size_t len);
+noinline void *__tsan_memcpy(void *dst, const void *src, size_t len)
+{
+ check_access(dst, len, KCSAN_ACCESS_WRITE, _RET_IP_);
+ check_access(src, len, 0, _RET_IP_);
+ return __memcpy(dst, src, len);
+}
+#else
+void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy);
+#endif
+EXPORT_SYMBOL(__tsan_memcpy);
--
2.37.2.789.g6183377224-goog

Marco Elver

unread,
Sep 9, 2022, 3:38:58 AMSep 9
to el...@google.com, Paul E. McKenney, Mark Rutland, Dmitry Vyukov, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Heiko Carstens, Vasily Gorbik, Alexander Gordeev, Christian Borntraeger, Sven Schnelle, Peter Zijlstra, linux...@vger.kernel.org
Adds KCSAN's volatile instrumentation to objtool's uaccess whitelist.

Recent kernel change have shown that this was missing from the uaccess
whitelist (since the first upstreamed version of KCSAN):

mm/gup.o: warning: objtool: fault_in_readable+0x101: call to __tsan_volatile_write1() with UACCESS enabled

Fixes: 75d75b7a4d54 ("kcsan: Support distinguishing volatile accesses")
Signed-off-by: Marco Elver <el...@google.com>
---
v2:
* Fix commit message.
---
tools/objtool/check.c | 10 ++++++++++
1 file changed, 10 insertions(+)

diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e55fdf952a3a..67afdce3421f 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -999,6 +999,16 @@ static const char *uaccess_safe_builtin[] = {
"__tsan_read_write4",
"__tsan_read_write8",
"__tsan_read_write16",
+ "__tsan_volatile_read1",
+ "__tsan_volatile_read2",
+ "__tsan_volatile_read4",
+ "__tsan_volatile_read8",
+ "__tsan_volatile_read16",
+ "__tsan_volatile_write1",
+ "__tsan_volatile_write2",
+ "__tsan_volatile_write4",
+ "__tsan_volatile_write8",
+ "__tsan_volatile_write16",
"__tsan_atomic8_load",
"__tsan_atomic16_load",
"__tsan_atomic32_load",
--
2.37.2.789.g6183377224-goog

Dmitry Vyukov

unread,
Sep 9, 2022, 4:36:16 AMSep 9
to Marco Elver, Paul E. McKenney, Mark Rutland, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Heiko Carstens, Vasily Gorbik, Alexander Gordeev, Christian Borntraeger, Sven Schnelle, Peter Zijlstra, linux...@vger.kernel.org
On Fri, 9 Sept 2022 at 09:38, Marco Elver <el...@google.com> wrote:
>
> Like other architectures, always declare __mem*() functions if the
> architecture defines __HAVE_ARCH_MEM*.
>
> For example, this is required by sanitizer runtimes to unambiguously
> refer to the arch versions of the mem-functions, and the compiler not
> attempting any "optimizations" such as replacing the calls with builtins
> (which may later be inlined etc.).
>
> Signed-off-by: Marco Elver <el...@google.com>

Acked-by: Dmitry Vyukov <dvy...@google.com>

Dmitry Vyukov

unread,
Sep 9, 2022, 4:38:18 AMSep 9
to Marco Elver, Paul E. McKenney, Mark Rutland, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Heiko Carstens, Vasily Gorbik, Alexander Gordeev, Christian Borntraeger, Sven Schnelle, Peter Zijlstra, linux...@vger.kernel.org, sta...@vger.kernel.org
On Fri, 9 Sept 2022 at 09:38, Marco Elver <el...@google.com> wrote:
>
These can use large sizes, does it make sense to truncate it to
MAX_ENCODABLE_SIZE?

Dmitry Vyukov

unread,
Sep 9, 2022, 4:38:57 AMSep 9
to Marco Elver, Paul E. McKenney, Mark Rutland, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Heiko Carstens, Vasily Gorbik, Alexander Gordeev, Christian Borntraeger, Sven Schnelle, Peter Zijlstra, linux...@vger.kernel.org
On Fri, 9 Sept 2022 at 09:38, Marco Elver <el...@google.com> wrote:
>
> Adds KCSAN's volatile instrumentation to objtool's uaccess whitelist.
>
> Recent kernel change have shown that this was missing from the uaccess
> whitelist (since the first upstreamed version of KCSAN):
>
> mm/gup.o: warning: objtool: fault_in_readable+0x101: call to __tsan_volatile_write1() with UACCESS enabled
>
> Fixes: 75d75b7a4d54 ("kcsan: Support distinguishing volatile accesses")
> Signed-off-by: Marco Elver <el...@google.com>

Reviewed-by: Dmitry Vyukov <dvy...@google.com>

Marco Elver

unread,
Sep 9, 2022, 4:44:32 AMSep 9
to Dmitry Vyukov, Paul E. McKenney, Mark Rutland, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Heiko Carstens, Vasily Gorbik, Alexander Gordeev, Christian Borntraeger, Sven Schnelle, Peter Zijlstra, linux...@vger.kernel.org, sta...@vger.kernel.org
Hmm, good point - that way it can still set up watchpoints on them.
I'll do a v3.

Marco Elver

unread,
Sep 12, 2022, 5:45:53 AMSep 12
to el...@google.com, Paul E. McKenney, Mark Rutland, Dmitry Vyukov, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Josh Poimboeuf, Peter Zijlstra, sta...@vger.kernel.org
With Clang version 16+, -fsanitize=thread will turn
memcpy/memset/memmove calls in instrumented functions into
__tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.

Add these functions to the core KCSAN runtime, so that we (a) catch data
races with mem* functions, and (b) won't run into linker errors with
such newer compilers.

Cc: sta...@vger.kernel.org # v5.10+
Signed-off-by: Marco Elver <el...@google.com>
---
v3:
* Truncate sizes larger than MAX_ENCODABLE_SIZE, so we still set up
watchpoints on them. Iterating through MAX_ENCODABLE_SIZE blocks may
result in pathological cases where performance would seriously suffer.
So let's avoid that for now.
* Just use memcpy/memset/memmove instead of __mem*() functions. Many
architectures that already support KCSAN don't define them (mips,
s390), and having both __mem* and mem versions of the functions
provides little benefit elsewhere; and backporting would become more
difficult, too. The compiler should not inline them given all
parameters are non-constants here.

v2:
* Fix for architectures which do not provide their own
memcpy/memset/memmove and instead use the generic versions in
lib/string. In this case we'll just alias the __tsan_ variants.
---
kernel/kcsan/core.c | 50 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 50 insertions(+)

diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index fe12dfe254ec..54d077e1a2dc 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -14,10 +14,12 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/sched.h>
+#include <linux/string.h>
#include <linux/uaccess.h>

#include "encoding.h"
@@ -1308,3 +1310,51 @@ noinline void __tsan_atomic_signal_fence(int memorder)
}
}
EXPORT_SYMBOL(__tsan_atomic_signal_fence);
+
+#ifdef __HAVE_ARCH_MEMSET
+void *__tsan_memset(void *s, int c, size_t count);
+noinline void *__tsan_memset(void *s, int c, size_t count)
+{
+ /*
+ * Instead of not setting up watchpoints where accessed size is greater
+ * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE.
+ */
+ size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE);
+
+ check_access(s, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
+ return memset(s, c, count);
+}
+#else
+void *__tsan_memset(void *s, int c, size_t count) __alias(memset);
+#endif
+EXPORT_SYMBOL(__tsan_memset);
+
+#ifdef __HAVE_ARCH_MEMMOVE
+void *__tsan_memmove(void *dst, const void *src, size_t len);
+noinline void *__tsan_memmove(void *dst, const void *src, size_t len)
+{
+ size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
+
+ check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
+ check_access(src, check_len, 0, _RET_IP_);
+ return memmove(dst, src, len);
+}
+#else
+void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove);
+#endif
+EXPORT_SYMBOL(__tsan_memmove);
+
+#ifdef __HAVE_ARCH_MEMCPY
+void *__tsan_memcpy(void *dst, const void *src, size_t len);
+noinline void *__tsan_memcpy(void *dst, const void *src, size_t len)
+{
+ size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
+
+ check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
+ check_access(src, check_len, 0, _RET_IP_);
+ return memcpy(dst, src, len);

Marco Elver

unread,
Sep 12, 2022, 5:45:55 AMSep 12
to el...@google.com, Paul E. McKenney, Mark Rutland, Dmitry Vyukov, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Josh Poimboeuf, Peter Zijlstra

Paul E. McKenney

unread,
Sep 14, 2022, 8:12:23 AMSep 14
to Marco Elver, Mark Rutland, Dmitry Vyukov, Alexander Potapenko, Boqun Feng, kasa...@googlegroups.com, linux-...@vger.kernel.org, Nathan Chancellor, Nick Desaulniers, ll...@lists.linux.dev, Josh Poimboeuf, Peter Zijlstra, sta...@vger.kernel.org
On Mon, Sep 12, 2022 at 11:45:40AM +0200, Marco Elver wrote:
> With Clang version 16+, -fsanitize=thread will turn
> memcpy/memset/memmove calls in instrumented functions into
> __tsan_memcpy/__tsan_memset/__tsan_memmove calls respectively.
>
> Add these functions to the core KCSAN runtime, so that we (a) catch data
> races with mem* functions, and (b) won't run into linker errors with
> such newer compilers.
>
> Cc: sta...@vger.kernel.org # v5.10+
> Signed-off-by: Marco Elver <el...@google.com>

Queued and pushed, thank you!

Thanx, Paul
Reply all
Reply to author
Forward
0 new messages