diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-07-12 17:38:34 +0400 |
---|---|---|
committer | Minda Chen <minda.chen@starfivetech.com> | 2023-11-06 14:24:47 +0300 |
commit | 9eb1421db480e3050d9db5b4dd9acb82126c4658 (patch) | |
tree | 610aeaf49ec792ace1712f7ac395dfad933566a9 /include | |
parent | 84da3d03693774548e4d8ade45eca4da6678bc71 (diff) | |
download | linux-9eb1421db480e3050d9db5b4dd9acb82126c4658.tar.xz |
net: Use skbufhead with raw lock
Use the rps lock as rawlock so we can keep irq-off regions. It looks low
latency. However we can't kfree() from this context therefore we defer this
to the softirq and use the tofree_queue list for it (similar to process_queue).
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/skbuff.h | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 841e2f0f5240..b5c228b09f52 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -297,6 +297,7 @@ struct sk_buff_head { __u32 qlen; spinlock_t lock; + raw_spinlock_t raw_lock; }; struct sk_buff; @@ -1916,6 +1917,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } +static inline void skb_queue_head_init_raw(struct sk_buff_head *list) +{ + raw_spin_lock_init(&list->raw_lock); + __skb_queue_head_init(list); +} + static inline void skb_queue_head_init_class(struct sk_buff_head *list, struct lock_class_key *class) { |