Google Groups no longer supports new Usenet posts or subscriptions. Historical content remains viewable.
Dismiss

[PATCH 1/3] kernel: use lockless list for smp_call_function_single()

7 views
Skip to first unread message

Jan Kara

unread,
Dec 18, 2013, 10:30:02 AM12/18/13
to
From: Christoph Hellwig <h...@lst.de>

Make smp_call_function_single and friends more efficient by using
a lockless list.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Signed-off-by: Jan Kara <ja...@suse.cz>
---
include/linux/elevator.h | 2 +-
include/linux/smp.h | 7 ++++++-
kernel/smp.c | 51 ++++++++++--------------------------------------
3 files changed, 17 insertions(+), 43 deletions(-)

diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 306dd8cd0b6f..ec0e8f04c03c 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -211,7 +211,7 @@ enum {
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \
- INIT_LIST_HEAD(&(rq)->csd.list); \
+ (rq)->csd.llist.next = NULL; \
} while (0)

#else /* CONFIG_BLOCK */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 5da22ee42e16..c0f38018c195 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,12 +11,17 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/llist.h>

extern void cpu_idle(void);

typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
- struct list_head list;
+ /* We still need list_head because of abuse by a block layer */
+ union {
+ struct list_head list;
+ struct llist_node llist;
+ };
smp_call_func_t func;
void *info;
u16 flags;
diff --git a/kernel/smp.c b/kernel/smp.c
index bd9f94028838..47b415e16c24 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -28,12 +28,7 @@ struct call_function_data {

static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);

-struct call_single_queue {
- struct list_head list;
- raw_spinlock_t lock;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);

static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -85,12 +80,8 @@ void __init call_function_init(void)
void *cpu = (void *)(long)smp_processor_id();
int i;

- for_each_possible_cpu(i) {
- struct call_single_queue *q = &per_cpu(call_single_queue, i);
-
- raw_spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->list);
- }
+ for_each_possible_cpu(i)
+ init_llist_head(&per_cpu(call_single_queue, i));

hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&hotplug_cfd_notifier);
@@ -141,18 +132,9 @@ static void csd_unlock(struct call_single_data *csd)
*/
static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
{
- struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
- unsigned long flags;
- int ipi;
-
if (wait)
csd->flags |= CSD_FLAG_WAIT;

- raw_spin_lock_irqsave(&dst->lock, flags);
- ipi = list_empty(&dst->list);
- list_add_tail(&csd->list, &dst->list);
- raw_spin_unlock_irqrestore(&dst->lock, flags);
-
/*
* The list addition should be visible before sending the IPI
* handler locks the list to pull the entry off it because of
@@ -164,7 +146,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
- if (ipi)
+ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
arch_send_call_function_single_ipi(cpu);

if (wait)
@@ -177,26 +159,19 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
*/
void generic_smp_call_function_single_interrupt(void)
{
- struct call_single_queue *q = &__get_cpu_var(call_single_queue);
- LIST_HEAD(list);
+ struct llist_node *entry;
+ struct call_single_data *csd, *csd_next;

/*
* Shouldn't receive this interrupt on a cpu that is not yet online.
*/
WARN_ON_ONCE(!cpu_online(smp_processor_id()));

- raw_spin_lock(&q->lock);
- list_replace_init(&q->list, &list);
- raw_spin_unlock(&q->lock);
-
- while (!list_empty(&list)) {
- struct call_single_data *csd;
-
- csd = list_entry(list.next, struct call_single_data, list);
- list_del(&csd->list);
+ entry = llist_del_all(&__get_cpu_var(call_single_queue));
+ entry = llist_reverse_order(entry);

+ llist_for_each_entry_safe(csd, csd_next, entry, llist) {
csd->func(csd->info);
-
csd_unlock(csd);
}
}
@@ -411,17 +386,11 @@ void smp_call_function_many(const struct cpumask *mask,

for_each_cpu(cpu, cfd->cpumask) {
struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
- struct call_single_queue *dst =
- &per_cpu(call_single_queue, cpu);
- unsigned long flags;

csd_lock(csd);
csd->func = func;
csd->info = info;
-
- raw_spin_lock_irqsave(&dst->lock, flags);
- list_add_tail(&csd->list, &dst->list);
- raw_spin_unlock_irqrestore(&dst->lock, flags);
+ llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
}

/* Send a message to all CPUs in the map */
--
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majo...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

Jan Kara

unread,
Dec 18, 2013, 10:30:02 AM12/18/13
to
Abusing rq->csd.list for a list of requests to complete is rather ugly.
Especially since using queuelist should be safe and much cleaner.

Signed-off-by: Jan Kara <ja...@suse.cz>
---
block/blk-softirq.c | 12 ++++++------
include/linux/smp.h | 6 +-----
2 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 57790c1a97eb..7ea5534096d5 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
while (!list_empty(&local_list)) {
struct request *rq;

- rq = list_entry(local_list.next, struct request, csd.list);
- list_del_init(&rq->csd.list);
+ rq = list_entry(local_list.next, struct request, queuelist);
+ list_del_init(&rq->queuelist);
rq->q->softirq_done_fn(rq);
}
}
@@ -45,9 +45,9 @@ static void trigger_softirq(void *data)

local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->csd.list, list);
+ list_add_tail(&rq->queuelist, list);

- if (list->next == &rq->csd.list)
+ if (list->next == &rq->queuelist)
raise_softirq_irqoff(BLOCK_SOFTIRQ);

local_irq_restore(flags);
@@ -136,7 +136,7 @@ void __blk_complete_request(struct request *req)
struct list_head *list;
do_local:
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&req->csd.list, list);
+ list_add_tail(&req->queuelist, list);

/*
* if the list only contains our just added request,
@@ -144,7 +144,7 @@ do_local:
* entries there, someone already raised the irq but it
* hasn't run yet.
*/
- if (list->next == &req->csd.list)
+ if (list->next == &req->queuelist)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
} else if (raise_blk_irq(ccpu, req))
goto do_local;
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c0f38018c195..9a1b8ba05924 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -17,11 +17,7 @@ extern void cpu_idle(void);

typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
- /* We still need list_head because of abuse by a block layer */
- union {
- struct list_head list;
- struct llist_node llist;
- };
+ struct llist_node llist;
smp_call_func_t func;
void *info;
u16 flags;

Jan Kara

unread,
Dec 18, 2013, 10:30:02 AM12/18/13
to
Hello,

these three patches make smp_call_function_single() use lockless lists and
cleanup abuse of csd.list in the block layer. I have some use for lockless
smp_call_function_single() in printk code and that's why I've picked up
Christoph's patch. Jens, can you have a look at the series please?

Honza

Jan Kara

unread,
Dec 18, 2013, 10:30:03 AM12/18/13
to
Block layer currently abuses rq->csd.list.next for storing fifo_time.
That is a terrible hack and completely unnecessary as well. Union
achieves the same space saving in a cleaner way.

Signed-off-by: Jan Kara <ja...@suse.cz>
---
block/cfq-iosched.c | 8 ++++----
block/deadline-iosched.c | 8 ++++----
include/linux/blkdev.h | 1 +
include/linux/elevator.h | 6 ------
4 files changed, 9 insertions(+), 14 deletions(-)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4d5cec1ad80d..37ea6045fdc6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2382,10 +2382,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
+ time_before(next->fifo_time, rq->fifo_time) &&
cfqq == RQ_CFQQ(next)) {
list_move(&rq->queuelist, &next->queuelist);
- rq_set_fifo_time(rq, rq_fifo_time(next));
+ rq->fifo_time = next->fifo_time;
}

if (cfqq->next_rq == next)
@@ -2829,7 +2829,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
return NULL;

rq = rq_entry_fifo(cfqq->fifo.next);
- if (time_before(jiffies, rq_fifo_time(rq)))
+ if (time_before(jiffies, rq->fifo_time))
rq = NULL;

cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
@@ -3942,7 +3942,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq));

- rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
+ rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 9ef66406c625..a753df2b3fc2 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* set expire time and add to fifo list
*/
- rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}

@@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
* and move into next position (next will be deleted) in fifo
*/
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ if (time_before(next->fifo_time, req->fifo_time)) {
list_move(&req->queuelist, &next->queuelist);
- rq_set_fifo_time(req, rq_fifo_time(next));
+ req->fifo_time = next->fifo_time;
}
}

@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
/*
* rq is expired!
*/
- if (time_after_eq(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq->fifo_time))
return 1;

return 0;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b135d49b279..744b5fb13a9b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -102,6 +102,7 @@ struct request {
union {
struct call_single_data csd;
struct work_struct mq_flush_data;
+ unsigned long fifo_time;
};

struct request_queue *q;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index ec0e8f04c03c..06860e2a25c3 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -202,12 +202,6 @@ enum {
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)

-/*
- * Hack to reuse the csd.list list_head as the fifo time holder while
- * the request is in the io scheduler. Saves an unsigned long in rq.
- */
-#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
-#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \
--
1.8.1.4

Christoph Hellwig

unread,
Dec 18, 2013, 10:40:03 AM12/18/13
to
On Wed, Dec 18, 2013 at 04:24:59PM +0100, Jan Kara wrote:
> Hello,
>
> these three patches make smp_call_function_single() use lockless lists and
> cleanup abuse of csd.list in the block layer. I have some use for lockless
> smp_call_function_single() in printk code and that's why I've picked up
> Christoph's patch. Jens, can you have a look at the series please?

seconded. Also I think your two cleanups should go before my patch.
I'll also dust up my other patch to use __smp_call_function_single
directly in the block layer once we make progress on these.

Jan Kara

unread,
Dec 18, 2013, 11:50:01 AM12/18/13
to
On Wed 18-12-13 07:36:44, Christoph Hellwig wrote:
> On Wed, Dec 18, 2013 at 04:24:59PM +0100, Jan Kara wrote:
> > Hello,
> >
> > these three patches make smp_call_function_single() use lockless lists and
> > cleanup abuse of csd.list in the block layer. I have some use for lockless
> > smp_call_function_single() in printk code and that's why I've picked up
> > Christoph's patch. Jens, can you have a look at the series please?
>
> seconded. Also I think your two cleanups should go before my patch.
Fair enough, I'll reorder the patches in the series and resend.

> I'll also dust up my other patch to use __smp_call_function_single
> directly in the block layer once we make progress on these.

Honza
--
Jan Kara <ja...@suse.cz>
SUSE Labs, CR

Jan Kara

unread,
Dec 18, 2013, 12:30:02 PM12/18/13
to
Abusing rq->csd.list for a list of requests to complete is rather ugly.
Especially since using queuelist should be safe and much cleaner.

Signed-off-by: Jan Kara <ja...@suse.cz>
---
block/blk-softirq.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
--
1.8.1.4

Jan Kara

unread,
Dec 18, 2013, 12:30:02 PM12/18/13
to
Block layer currently abuses rq->csd.list.next for storing fifo_time.
That is a terrible hack and completely unnecessary as well. Union
achieves the same space saving in a cleaner way.

Signed-off-by: Jan Kara <ja...@suse.cz>
---
index 306dd8cd0b6f..0bdfd46f4735 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -202,12 +202,6 @@ enum {
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)

-/*
- * Hack to reuse the csd.list list_head as the fifo time holder while
- * the request is in the io scheduler. Saves an unsigned long in rq.
- */
-#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
-#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \

Jan Kara

unread,
Dec 18, 2013, 12:30:02 PM12/18/13
to
From: Christoph Hellwig <h...@lst.de>

Make smp_call_function_single and friends more efficient by using
a lockless list.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Signed-off-by: Jan Kara <ja...@suse.cz>
---
include/linux/elevator.h | 2 +-
include/linux/smp.h | 3 ++-
kernel/smp.c | 51 ++++++++++--------------------------------------
3 files changed, 13 insertions(+), 43 deletions(-)

diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 0bdfd46f4735..06860e2a25c3 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -205,7 +205,7 @@ enum {
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \
- INIT_LIST_HEAD(&(rq)->csd.list); \
+ (rq)->csd.llist.next = NULL; \
} while (0)

#else /* CONFIG_BLOCK */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 5da22ee42e16..9a1b8ba05924 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,12 +11,13 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/llist.h>

extern void cpu_idle(void);

typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
- struct list_head list;
+ struct llist_node llist;
smp_call_func_t func;
void *info;
u16 flags;
diff --git a/kernel/smp.c b/kernel/smp.c
index bd9f94028838..47b415e16c24 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -28,12 +28,7 @@ struct call_function_data {

static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);

-struct call_single_queue {
- struct list_head list;

Jan Kara

unread,
Dec 18, 2013, 12:30:03 PM12/18/13
to
Hello,

these three patches make smp_call_function_single() use lockless lists and
cleanup abuse of csd.list in the block layer. I have some use for lockless
smp_call_function_single() in printk code and that's why I've picked up
Christoph's patch. Jens, can you have a look at the series please?

Changes since v1:
* Reordered patches to a more sensible order.

Honza

Jan Kara

unread,
Dec 18, 2013, 12:30:03 PM12/18/13
to
From: Christoph Hellwig <h...@lst.de>

Make smp_call_function_single and friends more efficient by using
a lockless list.

Signed-off-by: Christoph Hellwig <h...@lst.de>
Signed-off-by: Jan Kara <ja...@suse.cz>
---
include/linux/elevator.h | 2 +-
include/linux/smp.h | 7 ++++++-
kernel/smp.c | 51 ++++++++++--------------------------------------
3 files changed, 17 insertions(+), 43 deletions(-)

diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 306dd8cd0b6f..ec0e8f04c03c 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -211,7 +211,7 @@ enum {
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \
- INIT_LIST_HEAD(&(rq)->csd.list); \
+ (rq)->csd.llist.next = NULL; \
} while (0)

#else /* CONFIG_BLOCK */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 5da22ee42e16..c0f38018c195 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,12 +11,17 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/llist.h>

extern void cpu_idle(void);

typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
- struct list_head list;
+ /* We still need list_head because of abuse by a block layer */
+ union {
+ struct list_head list;
+ struct llist_node llist;
+ };

Jan Kara

unread,
Dec 18, 2013, 12:30:03 PM12/18/13
to
Block layer currently abuses rq->csd.list.next for storing fifo_time.
That is a terrible hack and completely unnecessary as well. Union
achieves the same space saving in a cleaner way.

Signed-off-by: Jan Kara <ja...@suse.cz>
---
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index ec0e8f04c03c..06860e2a25c3 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -202,12 +202,6 @@ enum {
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)

-/*
- * Hack to reuse the csd.list list_head as the fifo time holder while
- * the request is in the io scheduler. Saves an unsigned long in rq.
- */
-#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
-#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \
--
1.8.1.4

Jan Kara

unread,
Dec 18, 2013, 12:30:03 PM12/18/13
to
Abusing rq->csd.list for a list of requests to complete is rather ugly.
Especially since using queuelist should be safe and much cleaner.

Signed-off-by: Jan Kara <ja...@suse.cz>
---
block/blk-softirq.c | 12 ++++++------
include/linux/smp.h | 6 +-----
2 files changed, 7 insertions(+), 11 deletions(-)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c0f38018c195..9a1b8ba05924 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -17,11 +17,7 @@ extern void cpu_idle(void);

typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
- /* We still need list_head because of abuse by a block layer */
- union {
- struct list_head list;
- struct llist_node llist;
- };
+ struct llist_node llist;
smp_call_func_t func;
void *info;
u16 flags;
--
1.8.1.4

Jan Kara

unread,
Dec 18, 2013, 4:00:01 PM12/18/13
to
On Wed 18-12-13 18:23:33, Jan Kara wrote:
> Block layer currently abuses rq->csd.list.next for storing fifo_time.
> That is a terrible hack and completely unnecessary as well. Union
> achieves the same space saving in a cleaner way.
This is the correct patch 1/3.

Honza
Jan Kara <ja...@suse.cz>
SUSE Labs, CR

Jan Kara

unread,
Dec 18, 2013, 4:00:03 PM12/18/13
to
On Wed 18-12-13 18:23:36, Jan Kara wrote:
> Abusing rq->csd.list for a list of requests to complete is rather ugly.
> Especially since using queuelist should be safe and much cleaner.
This is the correct patch 2/3.

Honza
Jan Kara <ja...@suse.cz>
SUSE Labs, CR

Jan Kara

unread,
Dec 18, 2013, 4:00:03 PM12/18/13
to
On Wed 18-12-13 18:23:38, Jan Kara wrote:
> From: Christoph Hellwig <h...@lst.de>
>
> Make smp_call_function_single and friends more efficient by using
> a lockless list.
This is the correct patch 3/3.

Honza
Jan Kara <ja...@suse.cz>
SUSE Labs, CR

Jan Kara

unread,
Dec 18, 2013, 4:00:03 PM12/18/13
to
On Wed 18-12-13 18:23:32, Jan Kara wrote:
> Hello,
>
> these three patches make smp_call_function_single() use lockless lists and
> cleanup abuse of csd.list in the block layer. I have some use for lockless
> smp_call_function_single() in printk code and that's why I've picked up
> Christoph's patch. Jens, can you have a look at the series please?
>
> Changes since v1:
> * Reordered patches to a more sensible order.
Bah, I screwed up. I had both new and older version of the patches in the
directory for sending. Since it is just three patches, I'll just mark
correct patches by email but if that is a problem I can resend as well.

Honza
--
Jan Kara <ja...@suse.cz>
SUSE Labs, CR

Jens Axboe

unread,
Dec 19, 2013, 10:40:02 AM12/19/13
to
On Wed, Dec 18 2013, Jan Kara wrote:
> Hello,
>
> these three patches make smp_call_function_single() use lockless lists and
> cleanup abuse of csd.list in the block layer. I have some use for lockless
> smp_call_function_single() in printk code and that's why I've picked up
> Christoph's patch. Jens, can you have a look at the series please?
>
> Changes since v1:
> * Reordered patches to a more sensible order.

Thanks Jan, I'll throw this into the testing mix for 3.14. Will be a
little delayed over christmas, with holidays and family visiting.

--
Jens Axboe

Christoph Hellwig

unread,
Jan 7, 2014, 10:50:02 AM1/7/14
to
On Thu, Dec 19, 2013 at 08:31:54AM -0700, Jens Axboe wrote:
> On Wed, Dec 18 2013, Jan Kara wrote:
> > Hello,
> >
> > these three patches make smp_call_function_single() use lockless lists and
> > cleanup abuse of csd.list in the block layer. I have some use for lockless
> > smp_call_function_single() in printk code and that's why I've picked up
> > Christoph's patch. Jens, can you have a look at the series please?
> >
> > Changes since v1:
> > * Reordered patches to a more sensible order.
>
> Thanks Jan, I'll throw this into the testing mix for 3.14. Will be a
> little delayed over christmas, with holidays and family visiting.

Did you make any progress? Btw, I'd love if you could queue up the
patch to use __smp_call_function_single directly in the block layer
for 3.14. Independent of wether a llist using
__smp_call_function_single is faster than one using the lock we get
down from a list plus llist to just one of them, so I can't see how
that patch will cause an issue. With all the prep patches already in
it can be queue up independently of Jan's series as well.
0 new messages