summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_conn.c181
-rw-r--r--net/bluetooth/hci_event.c15
-rw-r--r--net/bluetooth/hci_sync.c150
-rw-r--r--net/bluetooth/iso.c26
-rw-r--r--net/bluetooth/l2cap_core.c3
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv4/udp_offload.c61
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/mac80211/status.c8
-rw-r--r--net/sched/sch_drr.c9
-rw-r--r--net/sched/sch_ets.c9
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_qfq.c11
-rw-r--r--net/xdp/xsk.c6
-rw-r--r--net/xdp/xsk_buff_pool.c1
15 files changed, 264 insertions, 222 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7e1b53857648..6533e281ada3 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -2064,95 +2064,6 @@ static int create_big_sync(struct hci_dev *hdev, void *data)
return hci_le_create_big(conn, &conn->iso_qos);
}
-static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
-{
- bt_dev_dbg(hdev, "");
-
- if (err)
- bt_dev_err(hdev, "Unable to create PA: %d", err);
-}
-
-static bool hci_conn_check_create_pa_sync(struct hci_conn *conn)
-{
- if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID)
- return false;
-
- return true;
-}
-
-static int create_pa_sync(struct hci_dev *hdev, void *data)
-{
- struct hci_cp_le_pa_create_sync cp = {0};
- struct hci_conn *conn;
- int err = 0;
-
- hci_dev_lock(hdev);
-
- rcu_read_lock();
-
- /* The spec allows only one pending LE Periodic Advertising Create
- * Sync command at a time. If the command is pending now, don't do
- * anything. We check for pending connections after each PA Sync
- * Established event.
- *
- * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
- * page 2493:
- *
- * If the Host issues this command when another HCI_LE_Periodic_
- * Advertising_Create_Sync command is pending, the Controller shall
- * return the error code Command Disallowed (0x0C).
- */
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags))
- goto unlock;
- }
-
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (hci_conn_check_create_pa_sync(conn)) {
- struct bt_iso_qos *qos = &conn->iso_qos;
-
- cp.options = qos->bcast.options;
- cp.sid = conn->sid;
- cp.addr_type = conn->dst_type;
- bacpy(&cp.addr, &conn->dst);
- cp.skip = cpu_to_le16(qos->bcast.skip);
- cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
- cp.sync_cte_type = qos->bcast.sync_cte_type;
-
- break;
- }
- }
-
-unlock:
- rcu_read_unlock();
-
- hci_dev_unlock(hdev);
-
- if (bacmp(&cp.addr, BDADDR_ANY)) {
- hci_dev_set_flag(hdev, HCI_PA_SYNC);
- set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
-
- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
- sizeof(cp), &cp, HCI_CMD_TIMEOUT);
- if (!err)
- err = hci_update_passive_scan_sync(hdev);
-
- if (err) {
- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
- }
- }
-
- return err;
-}
-
-int hci_pa_create_sync_pending(struct hci_dev *hdev)
-{
- /* Queue start pa_create_sync and scan */
- return hci_cmd_sync_queue(hdev, create_pa_sync,
- NULL, create_pa_complete);
-}
-
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid,
struct bt_iso_qos *qos)
@@ -2167,97 +2078,18 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
conn->dst_type = dst_type;
conn->sid = sid;
conn->state = BT_LISTEN;
+ conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10);
hci_conn_hold(conn);
- hci_pa_create_sync_pending(hdev);
+ hci_connect_pa_sync(hdev, conn);
return conn;
}
-static bool hci_conn_check_create_big_sync(struct hci_conn *conn)
-{
- if (!conn->num_bis)
- return false;
-
- return true;
-}
-
-static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err)
-{
- bt_dev_dbg(hdev, "");
-
- if (err)
- bt_dev_err(hdev, "Unable to create BIG sync: %d", err);
-}
-
-static int big_create_sync(struct hci_dev *hdev, void *data)
-{
- DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11);
- struct hci_conn *conn;
-
- rcu_read_lock();
-
- pdu->num_bis = 0;
-
- /* The spec allows only one pending LE BIG Create Sync command at
- * a time. If the command is pending now, don't do anything. We
- * check for pending connections after each BIG Sync Established
- * event.
- *
- * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
- * page 2586:
- *
- * If the Host sends this command when the Controller is in the
- * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
- * Established event has not been generated, the Controller shall
- * return the error code Command Disallowed (0x0C).
- */
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags))
- goto unlock;
- }
-
- list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- if (hci_conn_check_create_big_sync(conn)) {
- struct bt_iso_qos *qos = &conn->iso_qos;
-
- set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
-
- pdu->handle = qos->bcast.big;
- pdu->sync_handle = cpu_to_le16(conn->sync_handle);
- pdu->encryption = qos->bcast.encryption;
- memcpy(pdu->bcode, qos->bcast.bcode,
- sizeof(pdu->bcode));
- pdu->mse = qos->bcast.mse;
- pdu->timeout = cpu_to_le16(qos->bcast.timeout);
- pdu->num_bis = conn->num_bis;
- memcpy(pdu->bis, conn->bis, conn->num_bis);
-
- break;
- }
- }
-
-unlock:
- rcu_read_unlock();
-
- if (!pdu->num_bis)
- return 0;
-
- return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
- struct_size(pdu, bis, pdu->num_bis), pdu);
-}
-
-int hci_le_big_create_sync_pending(struct hci_dev *hdev)
-{
- /* Queue big_create_sync */
- return hci_cmd_sync_queue_once(hdev, big_create_sync,
- NULL, big_create_sync_complete);
-}
-
-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
- struct bt_iso_qos *qos,
- __u16 sync_handle, __u8 num_bis, __u8 bis[])
+int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos, __u16 sync_handle,
+ __u8 num_bis, __u8 bis[])
{
int err;
@@ -2274,9 +2106,10 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
hcon->num_bis = num_bis;
memcpy(hcon->bis, bis, num_bis);
+ hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10);
}
- return hci_le_big_create_sync_pending(hdev);
+ return hci_connect_big_sync(hdev, hcon);
}
static void create_big_complete(struct hci_dev *hdev, void *data, int err)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 5f808f0b0e9a..6d6061111ac5 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -6378,8 +6378,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_PA_SYNC);
- conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr,
- ev->bdaddr_type);
+ conn = hci_conn_hash_lookup_create_pa_sync(hdev);
if (!conn) {
bt_dev_err(hdev,
"Unable to find connection for dst %pMR sid 0x%2.2x",
@@ -6418,9 +6417,6 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
}
unlock:
- /* Handle any other pending PA sync command */
- hci_pa_create_sync_pending(hdev);
-
hci_dev_unlock(hdev);
}
@@ -6932,7 +6928,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
+ if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
flex_array_size(ev, bis, ev->num_bis)))
return;
@@ -7003,9 +6999,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
}
unlock:
- /* Handle any other pending BIG sync command */
- hci_le_big_create_sync_pending(hdev);
-
hci_dev_unlock(hdev);
}
@@ -7127,8 +7120,8 @@ static const struct hci_le_ev {
hci_le_create_big_complete_evt,
sizeof(struct hci_evt_le_create_big_complete),
HCI_MAX_EVENT_SIZE),
- /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
- HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
+ /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
+ HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
hci_le_big_sync_established_evt,
sizeof(struct hci_evt_le_big_sync_estabilished),
HCI_MAX_EVENT_SIZE),
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 609b035e5c90..e56b1cbedab9 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -2693,16 +2693,16 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
/* Force address filtering if PA Sync is in progress */
if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
- struct hci_cp_le_pa_create_sync *sent;
+ struct hci_conn *conn;
- sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
- if (sent) {
+ conn = hci_conn_hash_lookup_create_pa_sync(hdev);
+ if (conn) {
struct conn_params pa;
memset(&pa, 0, sizeof(pa));
- bacpy(&pa.addr, &sent->addr);
- pa.addr_type = sent->addr_type;
+ bacpy(&pa.addr, &conn->dst);
+ pa.addr_type = conn->dst_type;
/* Clear first since there could be addresses left
* behind.
@@ -6895,3 +6895,143 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
+
+static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+{
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (!err)
+ return;
+
+ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+
+ if (err == -ECANCELED)
+ return;
+
+ hci_dev_lock(hdev);
+
+ hci_update_passive_scan_sync(hdev);
+
+ hci_dev_unlock(hdev);
+}
+
+static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+{
+ struct hci_cp_le_pa_create_sync cp;
+ struct hci_conn *conn = data;
+ struct bt_iso_qos *qos = &conn->iso_qos;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
+ return -EBUSY;
+
+ /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can
+ * program the address in the allow list so PA advertisements can be
+ * received.
+ */
+ set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+
+ hci_update_passive_scan_sync(hdev);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.options = qos->bcast.options;
+ cp.sid = conn->sid;
+ cp.addr_type = conn->dst_type;
+ bacpy(&cp.addr, &conn->dst);
+ cp.skip = cpu_to_le16(qos->bcast.skip);
+ cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
+ cp.sync_cte_type = qos->bcast.sync_cte_type;
+
+ /* The spec allows only one pending LE Periodic Advertising Create
+ * Sync command at a time so we forcefully wait for PA Sync Established
+ * event since cmd_work can only schedule one command at a time.
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 2493:
+ *
+ * If the Host issues this command when another HCI_LE_Periodic_
+ * Advertising_Create_Sync command is pending, the Controller shall
+ * return the error code Command Disallowed (0x0C).
+ */
+ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC,
+ sizeof(cp), &cp,
+ HCI_EV_LE_PA_SYNC_ESTABLISHED,
+ conn->conn_timeout, NULL);
+ if (err == -ETIMEDOUT)
+ __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
+ return err;
+}
+
+int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn,
+ create_pa_complete);
+}
+
+static void create_big_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct hci_conn *conn = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (err == -ECANCELED)
+ return;
+
+ if (hci_conn_valid(hdev, conn))
+ clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
+}
+
+static int hci_le_big_create_sync(struct hci_dev *hdev, void *data)
+{
+ DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11);
+ struct hci_conn *conn = data;
+ struct bt_iso_qos *qos = &conn->iso_qos;
+ int err;
+
+ if (!hci_conn_valid(hdev, conn))
+ return -ECANCELED;
+
+ set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags);
+
+ memset(cp, 0, sizeof(*cp));
+ cp->handle = qos->bcast.big;
+ cp->sync_handle = cpu_to_le16(conn->sync_handle);
+ cp->encryption = qos->bcast.encryption;
+ memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode));
+ cp->mse = qos->bcast.mse;
+ cp->timeout = cpu_to_le16(qos->bcast.timeout);
+ cp->num_bis = conn->num_bis;
+ memcpy(cp->bis, conn->bis, conn->num_bis);
+
+ /* The spec allows only one pending LE BIG Create Sync command at
+ * a time, so we forcefully wait for BIG Sync Established event since
+ * cmd_work can only schedule one command at a time.
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 2586:
+ *
+ * If the Host sends this command when the Controller is in the
+ * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_
+ * Established event has not been generated, the Controller shall
+ * return the error code Command Disallowed (0x0C).
+ */
+ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
+ struct_size(cp, bis, cp->num_bis), cp,
+ HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
+ conn->conn_timeout, NULL);
+ if (err == -ETIMEDOUT)
+ hci_le_big_terminate_sync(hdev, cp->handle);
+
+ return err;
+}
+
+int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn,
+ create_big_complete);
+}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 3501a991f1c6..2819cda616bc 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -1462,14 +1462,13 @@ static void iso_conn_big_sync(struct sock *sk)
lock_sock(sk);
if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
- err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
- &iso_pi(sk)->qos,
- iso_pi(sk)->sync_handle,
- iso_pi(sk)->bc_num_bis,
- iso_pi(sk)->bc_bis);
+ err = hci_conn_big_create_sync(hdev, iso_pi(sk)->conn->hcon,
+ &iso_pi(sk)->qos,
+ iso_pi(sk)->sync_handle,
+ iso_pi(sk)->bc_num_bis,
+ iso_pi(sk)->bc_bis);
if (err)
- bt_dev_err(hdev, "hci_le_big_create_sync: %d",
- err);
+ bt_dev_err(hdev, "hci_big_create_sync: %d", err);
}
release_sock(sk);
@@ -1922,7 +1921,7 @@ static void iso_conn_ready(struct iso_conn *conn)
hcon);
} else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
ev = hci_recv_event_data(hcon->hdev,
- HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
+ HCI_EVT_LE_BIG_SYNC_ESTABLISHED);
/* Get reference to PA sync parent socket, if it exists */
parent = iso_get_sock(&hcon->src, &hcon->dst,
@@ -2113,12 +2112,11 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
- err = hci_le_big_create_sync(hdev,
- hcon,
- &iso_pi(sk)->qos,
- iso_pi(sk)->sync_handle,
- iso_pi(sk)->bc_num_bis,
- iso_pi(sk)->bc_bis);
+ err = hci_conn_big_create_sync(hdev, hcon,
+ &iso_pi(sk)->qos,
+ iso_pi(sk)->sync_handle,
+ iso_pi(sk)->bc_num_bis,
+ iso_pi(sk)->bc_bis);
if (err) {
bt_dev_err(hdev, "hci_le_big_create_sync: %d",
err);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5ca7ac43c58d..73472756618a 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7415,6 +7415,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
return -ENOMEM;
/* Init rx_len */
conn->rx_len = len;
+
+ skb_set_delivery_time(conn->rx_skb, skb->tstamp,
+ skb->tstamp_type);
}
/* Copy as much as the rx_skb can hold */
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 934f777f29d3..d293087b426d 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -439,7 +439,7 @@ static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
iif, sdif);
NAPI_GRO_CB(skb)->is_flist = !sk;
if (sk)
- sock_put(sk);
+ sock_gen_put(sk);
}
INDIRECT_CALLABLE_SCOPE
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 2c0725583be3..9a8142ccbabe 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -247,6 +247,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
return segs;
}
+static void __udpv6_gso_segment_csum(struct sk_buff *seg,
+ struct in6_addr *oldip,
+ const struct in6_addr *newip,
+ __be16 *oldport, __be16 newport)
+{
+ struct udphdr *uh = udp_hdr(seg);
+
+ if (ipv6_addr_equal(oldip, newip) && *oldport == newport)
+ return;
+
+ if (uh->check) {
+ inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32,
+ newip->s6_addr32, true);
+
+ inet_proto_csum_replace2(&uh->check, seg, *oldport, newport,
+ false);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+
+ *oldip = *newip;
+ *oldport = newport;
+}
+
+static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs)
+{
+ const struct ipv6hdr *iph;
+ const struct udphdr *uh;
+ struct ipv6hdr *iph2;
+ struct sk_buff *seg;
+ struct udphdr *uh2;
+
+ seg = segs;
+ uh = udp_hdr(seg);
+ iph = ipv6_hdr(seg);
+ uh2 = udp_hdr(seg->next);
+ iph2 = ipv6_hdr(seg->next);
+
+ if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) &&
+ ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
+ ipv6_addr_equal(&iph->daddr, &iph2->daddr))
+ return segs;
+
+ while ((seg = seg->next)) {
+ uh2 = udp_hdr(seg);
+ iph2 = ipv6_hdr(seg);
+
+ __udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
+ &uh2->source, uh->source);
+ __udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
+ &uh2->dest, uh->dest);
+ }
+
+ return segs;
+}
+
static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6)
@@ -259,7 +315,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
- return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
+ if (is_ipv6)
+ return __udpv6_gso_segment_list_csum(skb);
+ else
+ return __udpv4_gso_segment_list_csum(skb);
}
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index d9b11fe41bf0..a8a04f441e78 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -42,7 +42,7 @@ static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
iif, sdif);
NAPI_GRO_CB(skb)->is_flist = !sk;
if (sk)
- sock_put(sk);
+ sock_gen_put(sk);
#endif /* IS_ENABLED(CONFIG_IPV6) */
}
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index b17b3cc7fb90..a362254b310c 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -1085,7 +1085,13 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp);
- if (status->free_list)
+ /*
+ * This is a bit racy but we can avoid a lot of work
+ * with this test...
+ */
+ if (local->tx_mntrs)
+ ieee80211_tx_monitor(local, skb, retry_count, status);
+ else if (status->free_list)
list_add_tail(&skb->list, status->free_list);
else
dev_kfree_skb(skb);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index e0a81d313aa7..9b6d79bd8737 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -35,6 +35,11 @@ struct drr_sched {
struct Qdisc_class_hash clhash;
};
+static bool cl_is_active(struct drr_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
{
struct drr_sched *q = qdisc_priv(sch);
@@ -337,7 +342,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
int err = 0;
- bool first;
cl = drr_classify(skb, sch, &err);
if (cl == NULL) {
@@ -347,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -357,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- if (first) {
+ if (!cl_is_active(cl)) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index c3bdeb14185b..2c069f0181c6 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
[TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
};
+static bool cl_is_active(struct ets_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
unsigned int *quantum,
struct netlink_ext_ack *extack)
@@ -416,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct ets_sched *q = qdisc_priv(sch);
struct ets_class *cl;
int err = 0;
- bool first;
cl = ets_classify(skb, sch, &err);
if (!cl) {
@@ -426,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -436,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- if (first && !ets_class_is_strict(q, cl)) {
+ if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6c8ef826cec0..cb8c525ea20e 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1569,7 +1569,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
return err;
}
- if (first) {
+ if (first && !cl->cl_nactive) {
if (cl->cl_flags & HFSC_RSC)
init_ed(cl, len);
if (cl->cl_flags & HFSC_FSC)
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 687a932eb9b2..bf1282cb22eb 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -202,6 +202,11 @@ struct qfq_sched {
*/
enum update_reason {enqueue, requeue};
+static bool cl_is_active(struct qfq_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
{
struct qfq_sched *q = qdisc_priv(sch);
@@ -1215,7 +1220,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct qfq_class *cl;
struct qfq_aggregate *agg;
int err = 0;
- bool first;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
@@ -1237,7 +1241,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
@@ -1253,8 +1256,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
++sch->q.qlen;
agg = cl->agg;
- /* if the queue was not empty, then done here */
- if (!first) {
+ /* if the class is active, then done here */
+ if (cl_is_active(cl)) {
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
list_first_entry(&agg->active, struct qfq_class, alist)
== cl && cl->deficit < len)
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 5696af45bcf7..4abc81f33d3e 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -338,13 +338,14 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
u32 len = xdp_get_buff_len(xdp);
int err;
- spin_lock_bh(&xs->rx_lock);
err = xsk_rcv_check(xs, xdp, len);
if (!err) {
+ spin_lock_bh(&xs->pool->rx_lock);
err = __xsk_rcv(xs, xdp, len);
xsk_flush(xs);
+ spin_unlock_bh(&xs->pool->rx_lock);
}
- spin_unlock_bh(&xs->rx_lock);
+
return err;
}
@@ -1734,7 +1735,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
xs = xdp_sk(sk);
xs->state = XSK_READY;
mutex_init(&xs->mutex);
- spin_lock_init(&xs->rx_lock);
INIT_LIST_HEAD(&xs->map_list);
spin_lock_init(&xs->map_list_lock);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 25a76c5ce0f1..c5181a9044ad 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -89,6 +89,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
pool->addrs = umem->addrs;
pool->tx_metadata_len = umem->tx_metadata_len;
pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM;
+ spin_lock_init(&pool->rx_lock);
INIT_LIST_HEAD(&pool->free_list);
INIT_LIST_HEAD(&pool->xskb_list);
INIT_LIST_HEAD(&pool->xsk_tx_list);