Re: [PATCH v3 01/17] slab: Reimplement page_slab()

0 views
Skip to first unread message

Vlastimil Babka

unread,
Oct 27, 2025, 11:34:54 AMOct 27
to Matthew Wilcox (Oracle), Andrew Morton, Alexander Potapenko, Marco Elver, Christoph Lameter, David Rientjes, Roman Gushchin, Harry Yoo, linu...@kvack.org, Dmitry Vyukov, kasan-dev
On 10/24/25 22:44, Matthew Wilcox (Oracle) wrote:
> In order to separate slabs from folios, we need to convert from any page
> in a slab to the slab directly without going through a page to folio
> conversion first. page_slab() is a little different from other memdesc
> converters we have in that it will return NULL if the page is not part
> of a slab. This will be the normal style for memdesc converters in
> the future.
>
> kfence was the only user of page_slab(), so adjust it to the new way
> of working. It will need to be touched again when we separate slab
> from page.

+Cc KFENCE folks.

> Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>

Otherwise LGTM.

> ---
> include/linux/page-flags.h | 14 +-------------
> mm/kfence/core.c | 12 ++++++++----
> mm/slab.h | 28 ++++++++++++++++------------
> 3 files changed, 25 insertions(+), 29 deletions(-)
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index 0091ad1986bf..6d5e44968eab 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -1048,19 +1048,7 @@ PAGE_TYPE_OPS(Table, table, pgtable)
> */
> PAGE_TYPE_OPS(Guard, guard, guard)
>
> -FOLIO_TYPE_OPS(slab, slab)
> -
> -/**
> - * PageSlab - Determine if the page belongs to the slab allocator
> - * @page: The page to test.
> - *
> - * Context: Any context.
> - * Return: True for slab pages, false for any other kind of page.
> - */
> -static inline bool PageSlab(const struct page *page)
> -{
> - return folio_test_slab(page_folio(page));
> -}
> +PAGE_TYPE_OPS(Slab, slab, slab)
>
> #ifdef CONFIG_HUGETLB_PAGE
> FOLIO_TYPE_OPS(hugetlb, hugetlb)
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 727c20c94ac5..b16e73fd5b68 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -612,13 +612,15 @@ static unsigned long kfence_init_pool(void)
> * enters __slab_free() slow-path.
> */
> for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> + struct page *page;
> struct slab *slab;
>
> if (!i || (i % 2))
> continue;
>
> - slab = page_slab(pfn_to_page(start_pfn + i));
> - __folio_set_slab(slab_folio(slab));
> + page = pfn_to_page(start_pfn + i);
> + __SetPageSlab(page);
> + slab = page_slab(page);
> #ifdef CONFIG_MEMCG
> slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
> MEMCG_DATA_OBJEXTS;
> @@ -665,16 +667,18 @@ static unsigned long kfence_init_pool(void)
>
> reset_slab:
> for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> + struct page *page;
> struct slab *slab;
>
> if (!i || (i % 2))
> continue;
>
> - slab = page_slab(pfn_to_page(start_pfn + i));
> + page = pfn_to_page(start_pfn + i);
> + slab = page_slab(page);
> #ifdef CONFIG_MEMCG
> slab->obj_exts = 0;
> #endif
> - __folio_clear_slab(slab_folio(slab));
> + __ClearPageSlab(page);
> }
>
> return addr;
> diff --git a/mm/slab.h b/mm/slab.h
> index 078daecc7cf5..a64b9b2c8731 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -146,20 +146,24 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
> struct slab *: (struct folio *)s))
>
> /**
> - * page_slab - Converts from first struct page to slab.
> - * @p: The first (either head of compound or single) page of slab.
> + * page_slab - Converts from struct page to its slab.
> + * @page: A page which may or may not belong to a slab.
> *
> - * A temporary wrapper to convert struct page to struct slab in situations where
> - * we know the page is the compound head, or single order-0 page.
> - *
> - * Long-term ideally everything would work with struct slab directly or go
> - * through folio to struct slab.
> - *
> - * Return: The slab which contains this page
> + * Return: The slab which contains this page or NULL if the page does
> + * not belong to a slab. This includes pages returned from large kmalloc.
> */
> -#define page_slab(p) (_Generic((p), \
> - const struct page *: (const struct slab *)(p), \
> - struct page *: (struct slab *)(p)))
> +static inline struct slab *page_slab(const struct page *page)
> +{
> + unsigned long head;
> +
> + head = READ_ONCE(page->compound_head);
> + if (head & 1)
> + page = (struct page *)(head - 1);
> + if (data_race(page->page_type >> 24) != PGTY_slab)
> + page = NULL;
> +
> + return (struct slab *)page;
> +}
>
> /**
> * slab_page - The first struct page allocated for a slab

Reply all
Reply to author
Forward
0 new messages