-
Notifications
You must be signed in to change notification settings - Fork 103
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix #806: Reduce count of IPI during work_queue processing. #1072
Changes from all commits
5577d2a
c39e8d7
a82e10e
aac45fc
914bcca
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -38,13 +38,37 @@ typedef struct { | |
long last_head; | ||
atomic64_t head ____cacheline_aligned; | ||
atomic64_t tail ____cacheline_aligned; | ||
unsigned long flags; | ||
} TfwRBQueue; | ||
|
||
enum { | ||
/* Enable IPI generation. */ | ||
TFW_QUEUE_IPI = 0 | ||
}; | ||
|
||
#define TFW_WQ_IPI_SYNC(size_cb, wq) \ | ||
do { \ | ||
set_bit(TFW_QUEUE_IPI, &(wq)->flags); \ | ||
smp_mb__after_atomic(); \ | ||
if (!size_cb(wq)) \ | ||
return; \ | ||
clear_bit(TFW_QUEUE_IPI, &(wq)->flags); \ | ||
} while (0) | ||
|
||
int tfw_wq_init(TfwRBQueue *wq, int node); | ||
void tfw_wq_destroy(TfwRBQueue *wq); | ||
long __tfw_wq_push(TfwRBQueue *wq, void *ptr); | ||
int tfw_wq_pop_ticket(TfwRBQueue *wq, void *buf, long *ticket); | ||
|
||
static inline int | ||
tfw_wq_size(TfwRBQueue *q) | ||
{ | ||
long t = atomic64_read(&q->tail); | ||
long h = atomic64_read(&q->head); | ||
|
||
return t > h ? 0 : h - t; | ||
} | ||
|
||
static inline void | ||
tfw_raise_softirq(int cpu, struct irq_work *work, | ||
void (*local_cpu_cb)(struct irq_work *)) | ||
|
@@ -62,8 +86,14 @@ tfw_wq_push(TfwRBQueue *q, void *ptr, int cpu, struct irq_work *work, | |
long ticket = __tfw_wq_push(q, ptr); | ||
if (unlikely(ticket)) | ||
return ticket; | ||
/* | ||
* The atomic operation is 'atomic64_cmpxchg()' in | ||
* '__tfw_wq_push()' above. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fast path take place only when the queue is full and |
||
*/ | ||
smp_mb__after_atomic(); | ||
|
||
tfw_raise_softirq(cpu, work, local_cpu_cb); | ||
if (test_bit(TFW_QUEUE_IPI, &q->flags)) | ||
tfw_raise_softirq(cpu, work, local_cpu_cb); | ||
|
||
return 0; | ||
} | ||
|
@@ -74,13 +104,4 @@ tfw_wq_pop(TfwRBQueue *wq, void *buf) | |
return tfw_wq_pop_ticket(wq, buf, NULL); | ||
} | ||
|
||
static inline int | ||
tfw_wq_size(TfwRBQueue *q) | ||
{ | ||
long t = atomic64_read(&q->tail); | ||
long h = atomic64_read(&q->head); | ||
|
||
return t > h ? 0 : h - t; | ||
} | ||
|
||
#endif /* __TFW_WORK_QUEUE_H__ */ |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
OK, we don't need a barrier after spinlock operation, but having a comment here would be good in case if we remove the spinlock.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.