diff options
Diffstat (limited to 'net')
314 files changed, 4909 insertions, 14033 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 5aa8144101dc..0beb44f2fe1f 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -467,12 +467,9 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head, off_vlan = skb_gro_offset(skb); hlen = off_vlan + sizeof(*vhdr); - vhdr = skb_gro_header_fast(skb, off_vlan); - if (skb_gro_header_hard(skb, hlen)) { - vhdr = skb_gro_header_slow(skb, hlen, off_vlan); - if (unlikely(!vhdr)) - goto out; - } + vhdr = skb_gro_header(skb, hlen, off_vlan); + if (unlikely(!vhdr)) + goto out; type = vhdr->h_vlan_encapsulated_proto; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 035812b0461c..e1bb41a443c4 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -674,9 +674,9 @@ static int vlan_ethtool_get_link_ksettings(struct net_device *dev, static void vlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, vlan_fullname, sizeof(info->driver)); - strlcpy(info->version, vlan_version, sizeof(info->version)); - strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strscpy(info->driver, vlan_fullname, sizeof(info->driver)); + strscpy(info->version, vlan_version, sizeof(info->version)); + strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); } static int vlan_ethtool_get_ts_info(struct net_device *dev, diff --git a/net/Kconfig b/net/Kconfig index 6b78f695caa6..48c33c222199 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -204,7 +204,6 @@ config BRIDGE_NETFILTER source "net/netfilter/Kconfig" source "net/ipv4/netfilter/Kconfig" source "net/ipv6/netfilter/Kconfig" -source "net/decnet/netfilter/Kconfig" source "net/bridge/netfilter/Kconfig" endif @@ -221,7 +220,6 @@ source "net/802/Kconfig" source "net/bridge/Kconfig" source "net/dsa/Kconfig" source "net/8021q/Kconfig" -source "net/decnet/Kconfig" source "net/llc/Kconfig" source "drivers/net/appletalk/Kconfig" source "net/x25/Kconfig" diff --git a/net/Makefile b/net/Makefile index fbfeb8a0bb37..6a62e5b27378 100644 --- a/net/Makefile +++ b/net/Makefile @@ -38,7 +38,6 @@ obj-$(CONFIG_AF_KCM) += kcm/ obj-$(CONFIG_STREAM_PARSER) += strparser/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_L2TP) += l2tp/ -obj-$(CONFIG_DECNET) += decnet/ obj-$(CONFIG_PHONET) += phonet/ ifneq ($(CONFIG_VLAN_8021Q),) obj-y += 8021q/ diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index d82a51e69386..6b4c25a92377 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -778,7 +778,7 @@ static int ax25_getsockopt(struct socket *sock, int level, int optname, ax25_dev = ax25->ax25_dev; if (ax25_dev != NULL && ax25_dev->dev != NULL) { - strlcpy(devname, ax25_dev->dev->name, sizeof(devname)); + strscpy(devname, ax25_dev->dev->name, sizeof(devname)); length = strlen(devname) + 1; } else { *devname = '\0'; diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 00875e1d8c44..a5e4a4e976cf 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -1493,6 +1493,7 @@ struct genl_family batadv_netlink_family __ro_after_init = { .module = THIS_MODULE, .small_ops = batadv_netlink_ops, .n_small_ops = ARRAY_SIZE(batadv_netlink_ops), + .resv_start_op = BATADV_CMD_SET_VLAN + 1, .mcgrps = batadv_netlink_mcgrps, .n_mcgrps = ARRAY_SIZE(batadv_netlink_mcgrps), }; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 485c814cf44a..6643c9c20fa4 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4179,6 +4179,17 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, } } + if (i == ARRAY_SIZE(hci_cc_table)) { + /* Unknown opcode, assume byte 0 contains the status, so + * that e.g. __hci_cmd_sync() properly returns errors + * for vendor specific commands send by HCI drivers. + * If a vendor doesn't actually follow this convention we may + * need to introduce a vendor CC table in order to properly set + * the status. + */ + *status = skb->data[0]; + } + handle_cmd_cnt_and_timer(hdev, ev->ncmd); hci_req_cmd_complete(hdev, *opcode, *status, req_complete, @@ -5790,7 +5801,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, */ hci_dev_clear_flag(hdev, HCI_LE_ADV); - conn = hci_lookup_le_connect(hdev); + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); if (!conn) { /* In case of error status and there is no connection pending * just unlock as there is nothing to cleanup. diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index e6d804b82b67..fbd5613eebfc 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -3018,12 +3018,6 @@ static const struct hci_init_stage amp_init2[] = { /* Read Buffer Size (ACL mtu, max pkt, etc.) */ static int hci_read_buffer_size_sync(struct hci_dev *hdev) { - /* Use Read LE Buffer Size V2 if supported */ - if (hdev->commands[41] & 0x20) - return __hci_cmd_sync_status(hdev, - HCI_OP_LE_READ_BUFFER_SIZE_V2, - 0, NULL, HCI_CMD_TIMEOUT); - return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } @@ -3237,6 +3231,12 @@ static const struct hci_init_stage hci_init2[] = { /* Read LE Buffer Size */ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) { + /* Use Read LE Buffer Size V2 if supported */ + if (hdev->commands[41] & 0x20) + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_BUFFER_SIZE_V2, + 0, NULL, HCI_CMD_TIMEOUT); + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } @@ -4773,9 +4773,11 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) /* Cleanup hci_conn object if it cannot be cancelled as it * likelly means the controller and host stack are out of sync. */ - if (err) + if (err) { + hci_dev_lock(hdev); hci_conn_failed(conn, err); - + hci_dev_unlock(hdev); + } return err; case BT_CONNECT2: return hci_reject_conn_sync(hdev, conn, reason); @@ -5288,17 +5290,21 @@ int hci_suspend_sync(struct hci_dev *hdev) /* Prevent disconnects from causing scanning to be re-enabled */ hci_pause_scan_sync(hdev); - /* Soft disconnect everything (power off) */ - err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); - if (err) { - /* Set state to BT_RUNNING so resume doesn't notify */ - hdev->suspend_state = BT_RUNNING; - hci_resume_sync(hdev); - return err; - } + if (hci_conn_count(hdev)) { + /* Soft disconnect everything (power off) */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) { + /* Set state to BT_RUNNING so resume doesn't notify */ + hdev->suspend_state = BT_RUNNING; + hci_resume_sync(hdev); + return err; + } - /* Update event mask so only the allowed event can wakeup the host */ - hci_set_event_mask_sync(hdev); + /* Update event mask so only the allowed event can wakeup the + * host. + */ + hci_set_event_mask_sync(hdev); + } /* Only configure accept list if disconnect succeeded and wake * isn't being prevented. diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 5940744a8cd8..cc20e706c639 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -83,14 +83,14 @@ static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo ci->product = session->input->id.product; ci->version = session->input->id.version; if (session->input->name) - strlcpy(ci->name, session->input->name, 128); + strscpy(ci->name, session->input->name, 128); else - strlcpy(ci->name, "HID Boot Device", 128); + strscpy(ci->name, "HID Boot Device", 128); } else if (session->hid) { ci->vendor = session->hid->vendor; ci->product = session->hid->product; ci->version = session->hid->version; - strlcpy(ci->name, session->hid->name, 128); + strscpy(ci->name, session->hid->name, 128); } } diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index ced8ad4fed4f..613039ba5dbf 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -1309,7 +1309,7 @@ static int iso_sock_shutdown(struct socket *sock, int how) struct sock *sk = sock->sk; int err = 0; - BT_DBG("sock %p, sk %p", sock, sk); + BT_DBG("sock %p, sk %p, how %d", sock, sk, how); if (!sk) return 0; @@ -1317,17 +1317,32 @@ static int iso_sock_shutdown(struct socket *sock, int how) sock_hold(sk); lock_sock(sk); - if (!sk->sk_shutdown) { - sk->sk_shutdown = SHUTDOWN_MASK; - iso_sock_clear_timer(sk); - __iso_sock_close(sk); - - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && - !(current->flags & PF_EXITING)) - err = bt_sock_wait_state(sk, BT_CLOSED, - sk->sk_lingertime); + switch (how) { + case SHUT_RD: + if (sk->sk_shutdown & RCV_SHUTDOWN) + goto unlock; + sk->sk_shutdown |= RCV_SHUTDOWN; + break; + case SHUT_WR: + if (sk->sk_shutdown & SEND_SHUTDOWN) + goto unlock; + sk->sk_shutdown |= SEND_SHUTDOWN; + break; + case SHUT_RDWR: + if (sk->sk_shutdown & SHUTDOWN_MASK) + goto unlock; + sk->sk_shutdown |= SHUTDOWN_MASK; + break; } + iso_sock_clear_timer(sk); + __iso_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + +unlock: release_sock(sk); sock_put(sk); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index cbe0cae73434..2c9de67daadc 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1992,11 +1992,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, src_match = !bacmp(&c->src, src); dst_match = !bacmp(&c->dst, dst); if (src_match && dst_match) { - c = l2cap_chan_hold_unless_zero(c); - if (c) { - read_unlock(&chan_list_lock); - return c; - } + if (!l2cap_chan_hold_unless_zero(c)) + continue; + + read_unlock(&chan_list_lock); + return c; } /* Closest match */ diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 6e31023b84f5..72e6595a71cc 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -4547,6 +4547,22 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_NOT_SUPPORTED); } +static u32 get_params_flags(struct hci_dev *hdev, + struct hci_conn_params *params) +{ + u32 flags = hdev->conn_flags; + + /* Devices using RPAs can only be programmed in the acceptlist if + * LL Privacy has been enable otherwise they cannot mark + * HCI_CONN_FLAG_REMOTE_WAKEUP. + */ + if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) && + hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) + flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP; + + return flags; +} + static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { @@ -4578,10 +4594,10 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, } else { params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); - if (!params) goto done; + supported_flags = get_params_flags(hdev, params); current_flags = params->flags; } @@ -4649,38 +4665,35 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", &cp->addr.bdaddr, cp->addr.type); } - } else { - params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, - le_addr_type(cp->addr.type)); - if (params) { - /* Devices using RPAs can only be programmed in the - * acceptlist LL Privacy has been enable otherwise they - * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP. - */ - if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && - !use_ll_privacy(hdev) && - hci_find_irk_by_addr(hdev, ¶ms->addr, - params->addr_type)) { - bt_dev_warn(hdev, - "Cannot set wakeable for RPA"); - goto unlock; - } - params->flags = current_flags; - status = MGMT_STATUS_SUCCESS; + goto unlock; + } - /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY - * has been set. - */ - if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY) - hci_update_passive_scan(hdev); - } else { - bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", - &cp->addr.bdaddr, - le_addr_type(cp->addr.type)); - } + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + if (!params) { + bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", + &cp->addr.bdaddr, le_addr_type(cp->addr.type)); + goto unlock; } + supported_flags = get_params_flags(hdev, params); + + if ((supported_flags | current_flags) != supported_flags) { + bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", + current_flags, supported_flags); + goto unlock; + } + + params->flags = current_flags; + status = MGMT_STATUS_SUCCESS; + + /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY + * has been set. + */ + if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY) + hci_update_passive_scan(hdev); + unlock: hci_dev_unlock(hdev); @@ -5054,7 +5067,6 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, else status = MGMT_STATUS_FAILED; - mgmt_pending_remove(cmd); goto unlock; } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index d11209367dd0..25d8ecf105aa 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -695,6 +695,10 @@ noinline void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) { } +noinline void bpf_kfunc_call_test_destructive(void) +{ +} + __diag_pop(); ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); @@ -719,6 +723,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) BTF_SET8_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 58a4f70e01e3..b82906fc999a 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -251,10 +251,10 @@ static int br_set_mac_address(struct net_device *dev, void *p) static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, "bridge", sizeof(info->driver)); - strlcpy(info->version, BR_VERSION, sizeof(info->version)); - strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); - strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); + strscpy(info->driver, "bridge", sizeof(info->driver)); + strscpy(info->version, BR_VERSION, sizeof(info->version)); + strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strscpy(info->bus_info, "N/A", sizeof(info->bus_info)); } static int br_get_link_ksettings(struct net_device *dev, diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index a84a7cfb9d6d..efbd93e92ce2 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -568,26 +568,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, !is_valid_ether_addr(dev->dev_addr)) return -EINVAL; - /* Also don't allow bridging of net devices that are DSA masters, since - * the bridge layer rx_handler prevents the DSA fake ethertype handler - * to be invoked, so we don't get the chance to strip off and parse the - * DSA switch tag protocol header (the bridge layer just returns - * RX_HANDLER_CONSUMED, stopping RX processing for these frames). - * The only case where that would not be an issue is when bridging can - * already be offloaded, such as when the DSA master is itself a DSA - * or plain switchdev port, and is bridged only with other ports from - * the same hardware device. - */ - if (netdev_uses_dsa(dev)) { - list_for_each_entry(p, &br->port_list, list) { - if (!netdev_port_same_parent_id(dev, p->dev)) { - NL_SET_ERR_MSG(extack, - "Cannot do software bridging with a DSA master"); - return -EINVAL; - } - } - } - /* No bridging of bridges */ if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) { NL_SET_ERR_MSG(extack, diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index ff4779036649..f20f4373ff40 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { + skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); goto bridged_dnat; } @@ -413,6 +414,7 @@ bridged_dnat: kfree_skb(skb); return 0; } + skb_dst_drop(skb); skb_dst_set_noref(skb, &rt->dst); } diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index e4e0c836c3f5..6b07f30675bb 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c @@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc kfree_skb(skb); return 0; } + skb_dst_drop(skb); skb_dst_set_noref(skb, &rt->dst); } diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 07fa76080512..74fdd8105dca 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -384,7 +384,7 @@ int br_sysfs_addif(struct net_bridge_port *p) return err; } - strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ); + strscpy(p->sysfs_name, p->dev->name, IFNAMSIZ); return sysfs_create_link(br->ifobj, &p->kobj, p->sysfs_name); } @@ -406,7 +406,7 @@ int br_sysfs_renameif(struct net_bridge_port *p) netdev_notice(br->dev, "unable to rename link %s to %s", p->sysfs_name, p->dev->name); else - strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ); + strscpy(p->sysfs_name, p->dev->name, IFNAMSIZ); return err; } diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 1a11064f9990..8f19253024b0 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = { .entries = (char *)&initial_chain, }; -static int check(const struct ebt_table_info *info, unsigned int valid_hooks) -{ - if (valid_hooks & ~(1 << NF_BR_BROUTING)) - return -EINVAL; - return 0; -} - static const struct ebt_table broute_table = { .name = "broute", .table = &initial_table, .valid_hooks = 1 << NF_BR_BROUTING, - .check = check, .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index cb949436bc0e..278f324e6752 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = { .entries = (char *)initial_chains, }; -static int check(const struct ebt_table_info *info, unsigned int valid_hooks) -{ - if (valid_hooks & ~FILTER_VALID_HOOKS) - return -EINVAL; - return 0; -} - static const struct ebt_table frame_filter = { .name = "filter", .table = &initial_table, .valid_hooks = FILTER_VALID_HOOKS, - .check = check, .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 5ee0531ae506..9066f7f376d5 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = { .entries = (char *)initial_chains, }; -static int check(const struct ebt_table_info *info, unsigned int valid_hooks) -{ - if (valid_hooks & ~NAT_VALID_HOOKS) - return -EINVAL; - return 0; -} - static const struct ebt_table frame_nat = { .name = "nat", .table = &initial_table, .valid_hooks = NAT_VALID_HOOKS, - .check = check, .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index f2dbefb61ce8..8f6639e095a0 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1040,8 +1040,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, goto free_iterate; } - /* the table doesn't like it */ - if (t->check && (ret = t->check(newinfo, repl->valid_hooks))) + if (repl->valid_hooks != t->valid_hooks) goto free_unlock; if (repl->num_counters && repl->num_counters != t->private->nentries) { @@ -1231,11 +1230,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table, if (ret != 0) goto free_chainstack; - if (table->check && table->check(newinfo, table->valid_hooks)) { - ret = -EINVAL; - goto free_chainstack; - } - table->private = newinfo; rwlock_init(&table->lock); mutex_lock(&ebt_mutex); @@ -1446,7 +1440,7 @@ static inline int ebt_obj_to_user(char __user *um, const char *_name, /* ebtables expects 31 bytes long names but xt_match names are 29 bytes * long. Copy 29 bytes and fill remaining bytes with zeroes. */ - strlcpy(name, _name, sizeof(name)); + strscpy(name, _name, sizeof(name)); if (copy_to_user(um, name, EBT_EXTENSION_MAXNAMELEN) || put_user(revision, (u8 __user *)(um + EBT_EXTENSION_MAXNAMELEN)) || put_user(datasize, (int __user *)(um + EBT_EXTENSION_MAXNAMELEN + 1)) || diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 52dd0b6835bc..6a0cba4fc29f 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -342,7 +342,7 @@ int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, mutex_lock(&caifdevs->lock); list_add_rcu(&caifd->list, &caifdevs->list); - strlcpy(caifd->layer.name, dev->name, + strscpy(caifd->layer.name, dev->name, sizeof(caifd->layer.name)); caifd->layer.transmit = transmit; res = cfcnfg_add_phy_layer(cfg, diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index 4be6b04879a1..ebc202ffdd8d 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -184,7 +184,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, dev_add_pack(&caif_usb_type); pack_added = true; - strlcpy(layer->name, dev->name, sizeof(layer->name)); + strscpy(layer->name, dev->name, sizeof(layer->name)); return 0; err: diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 23267c8db7c4..52509e185960 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -268,14 +268,14 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg, case CAIFPROTO_RFM: l->linktype = CFCTRL_SRV_RFM; l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; - strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, + strscpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, sizeof(l->u.rfm.volume)); break; case CAIFPROTO_UTIL: l->linktype = CFCTRL_SRV_UTIL; l->endpoint = 0x00; l->chtype = 0x00; - strlcpy(l->u.utility.name, s->sockaddr.u.util.service, + strscpy(l->u.utility.name, s->sockaddr.u.util.service, sizeof(l->u.utility.name)); caif_assert(sizeof(l->u.utility.name) > 10); l->u.utility.paramlen = s->param.size; diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 2809cbd6b7f7..cc405d8c7c30 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -258,7 +258,7 @@ int cfctrl_linkup_request(struct cflayer *layer, tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); cfpkt_add_body(pkt, &tmp16, 2); memset(utility_name, 0, sizeof(utility_name)); - strlcpy(utility_name, param->u.utility.name, + strscpy(utility_name, param->u.utility.name, UTILITY_NAME_LENGTH); cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); tmp8 = param->u.utility.paramlen; diff --git a/net/can/af_can.c b/net/can/af_can.c index 1fb49d51b25d..9503ab10f9b8 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -199,27 +199,26 @@ static int can_create(struct net *net, struct socket *sock, int protocol, int can_send(struct sk_buff *skb, int loop) { struct sk_buff *newskb = NULL; - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats; int err = -EINVAL; - if (skb->len == CAN_MTU) { + if (can_is_canxl_skb(skb)) { + skb->protocol = htons(ETH_P_CANXL); + } else if (can_is_can_skb(skb)) { skb->protocol = htons(ETH_P_CAN); - if (unlikely(cfd->len > CAN_MAX_DLEN)) - goto inval_skb; - } else if (skb->len == CANFD_MTU) { + } else if (can_is_canfd_skb(skb)) { + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + skb->protocol = htons(ETH_P_CANFD); - if (unlikely(cfd->len > CANFD_MAX_DLEN)) - goto inval_skb; + + /* set CAN FD flag for CAN FD frames by default */ + cfd->flags |= CANFD_FDF; } else { goto inval_skb; } - /* Make sure the CAN frame can pass the selected CAN netdevice. - * As structs can_frame and canfd_frame are similar, we can provide - * CAN FD frames to legacy CAN drivers as long as the length is <= 8 - */ - if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) { + /* Make sure the CAN frame can pass the selected CAN netdevice. */ + if (unlikely(skb->len > skb->dev->mtu)) { err = -EMSGSIZE; goto inval_skb; } @@ -678,53 +677,46 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) static int can_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - - if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) { + if (unlikely(dev->type != ARPHRD_CAN || (!can_is_can_skb(skb)))) { pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n", dev->type, skb->len); - goto free_skb; - } - /* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */ - if (unlikely(cfd->len > CAN_MAX_DLEN)) { - pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d, datalen %d\n", - dev->type, skb->len, cfd->len); - goto free_skb; + kfree_skb(skb); + return NET_RX_DROP; } can_receive(skb, dev); return NET_RX_SUCCESS; - -free_skb: - kfree_skb(skb); - return NET_RX_DROP; } static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - - if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) { + if (unlikely(dev->type != ARPHRD_CAN || (!can_is_canfd_skb(skb)))) { pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n", dev->type, skb->len); - goto free_skb; - } - /* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */ - if (unlikely(cfd->len > CANFD_MAX_DLEN)) { - pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d, datalen %d\n", - dev->type, skb->len, cfd->len); - goto free_skb; + kfree_skb(skb); + return NET_RX_DROP; } can_receive(skb, dev); return NET_RX_SUCCESS; +} -free_skb: - kfree_skb(skb); - return NET_RX_DROP; +static int canxl_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + if (unlikely(dev->type != ARPHRD_CAN || (!can_is_canxl_skb(skb)))) { + pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n", + dev->type, skb->len); + + kfree_skb(skb); + return NET_RX_DROP; + } + + can_receive(skb, dev); + return NET_RX_SUCCESS; } /* af_can protocol functions */ @@ -851,6 +843,11 @@ static struct packet_type canfd_packet __read_mostly = { .func = canfd_rcv, }; +static struct packet_type canxl_packet __read_mostly = { + .type = cpu_to_be16(ETH_P_CANXL), + .func = canxl_rcv, +}; + static const struct net_proto_family can_family_ops = { .family = PF_CAN, .create = can_create, @@ -890,6 +887,7 @@ static __init int can_init(void) dev_add_pack(&can_packet); dev_add_pack(&canfd_packet); + dev_add_pack(&canxl_packet); return 0; diff --git a/net/can/bcm.c b/net/can/bcm.c index e60161bec850..e5d179ba6f7d 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -648,8 +648,13 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) return; /* make sure to handle the correct frame type (CAN / CAN FD) */ - if (skb->len != op->cfsiz) - return; + if (op->flags & CAN_FD_FRAME) { + if (!can_is_canfd_skb(skb)) + return; + } else { + if (!can_is_can_skb(skb)) + return; + } /* disable timeout */ hrtimer_cancel(&op->timer); diff --git a/net/can/gw.c b/net/can/gw.c index 1ea4cc527db3..23a3d89cad81 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -463,10 +463,10 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) /* process strictly Classic CAN or CAN FD frames */ if (gwj->flags & CGW_FLAGS_CAN_FD) { - if (skb->len != CANFD_MTU) + if (!can_is_canfd_skb(skb)) return; } else { - if (skb->len != CAN_MTU) + if (!can_is_can_skb(skb)) return; } diff --git a/net/can/isotp.c b/net/can/isotp.c index 43a27d19cdac..a9d1357f8489 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -669,7 +669,7 @@ static void isotp_rcv(struct sk_buff *skb, void *data) if (cf->len <= CAN_MAX_DLEN) { isotp_rcv_sf(sk, cf, SF_PCI_SZ4 + ae, skb, sf_dl); } else { - if (skb->len == CANFD_MTU) { + if (can_is_canfd_skb(skb)) { /* We have a CAN FD frame and CAN_DL is greater than 8: * Only frames with the SF_DL == 0 ESC value are valid. * diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c index 8452b0fbb78c..144c86b0e3ff 100644 --- a/net/can/j1939/main.c +++ b/net/can/j1939/main.c @@ -42,6 +42,10 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data) struct j1939_sk_buff_cb *skcb, *iskcb; struct can_frame *cf; + /* make sure we only get Classical CAN frames */ + if (!can_is_can_skb(iskb)) + return; + /* create a copy of the skb * j1939 only delivers the real data bytes, * the header goes into sockaddr. diff --git a/net/can/raw.c b/net/can/raw.c index d1bd9cc51ebe..3eb7d3e2b541 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -50,6 +50,7 @@ #include <linux/skbuff.h> #include <linux/can.h> #include <linux/can/core.h> +#include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */ #include <linux/can/skb.h> #include <linux/can/raw.h> #include <net/sock.h> @@ -87,6 +88,7 @@ struct raw_sock { int loopback; int recv_own_msgs; int fd_frames; + int xl_frames; int join_filters; int count; /* number of active filters */ struct can_filter dfilter; /* default/single filter */ @@ -129,21 +131,21 @@ static void raw_rcv(struct sk_buff *oskb, void *data) if (!ro->recv_own_msgs && oskb->sk == sk) return; - /* do not pass non-CAN2.0 frames to a legacy socket */ - if (!ro->fd_frames && oskb->len != CAN_MTU) + /* make sure to not pass oversized frames to the socket */ + if ((can_is_canfd_skb(oskb) && !ro->fd_frames && !ro->xl_frames) || + (can_is_canxl_skb(oskb) && !ro->xl_frames)) return; /* eliminate multiple filter matches for the same skb */ if (this_cpu_ptr(ro->uniq)->skb == oskb && this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) { - if (ro->join_filters) { - this_cpu_inc(ro->uniq->join_rx_count); - /* drop frame until all enabled filters matched */ - if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count) - return; - } else { + if (!ro->join_filters) + return; + + this_cpu_inc(ro->uniq->join_rx_count); + /* drop frame until all enabled filters matched */ + if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count) return; - } } else { this_cpu_ptr(ro->uniq)->skb = oskb; this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt; @@ -346,6 +348,7 @@ static int raw_init(struct sock *sk) ro->loopback = 1; ro->recv_own_msgs = 0; ro->fd_frames = 0; + ro->xl_frames = 0; ro->join_filters = 0; /* alloc_percpu provides zero'ed memory */ @@ -669,6 +672,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, break; + case CAN_RAW_XL_FRAMES: + if (optlen != sizeof(ro->xl_frames)) + return -EINVAL; + + if (copy_from_sockptr(&ro->xl_frames, optval, optlen)) + return -EFAULT; + + break; + case CAN_RAW_JOIN_FILTERS: if (optlen != sizeof(ro->join_filters)) return -EINVAL; @@ -751,6 +763,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname, val = &ro->fd_frames; break; + case CAN_RAW_XL_FRAMES: + if (len > sizeof(int)) + len = sizeof(int); + val = &ro->xl_frames; + break; + case CAN_RAW_JOIN_FILTERS: if (len > sizeof(int)) len = sizeof(int); @@ -776,7 +794,11 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) struct sk_buff *skb; struct net_device *dev; int ifindex; - int err; + int err = -EINVAL; + + /* check for valid CAN frame sizes */ + if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU) + return -EINVAL; if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); @@ -796,15 +818,6 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (!dev) return -ENXIO; - err = -EINVAL; - if (ro->fd_frames && dev->mtu == CANFD_MTU) { - if (unlikely(size != CANFD_MTU && size != CAN_MTU)) - goto put_dev; - } else { - if (unlikely(size != CAN_MTU)) - goto put_dev; - } - skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) @@ -814,10 +827,27 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; + /* fill the skb before testing for valid CAN frames */ err = memcpy_from_msg(skb_put(skb, size), msg, size); if (err < 0) goto free_skb; + err = -EINVAL; + if (ro->xl_frames && can_is_canxl_dev_mtu(dev->mtu)) { + /* CAN XL, CAN FD and Classical CAN */ + if (!can_is_canxl_skb(skb) && !can_is_canfd_skb(skb) && + !can_is_can_skb(skb)) + goto free_skb; + } else if (ro->fd_frames && dev->mtu == CANFD_MTU) { + /* CAN FD and Classical CAN */ + if (!can_is_canfd_skb(skb) && !can_is_can_skb(skb)) + goto free_skb; + } else { + /* Classical CAN */ + if (!can_is_can_skb(skb)) + goto free_skb; + } + sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); @@ -942,12 +972,20 @@ static __init int raw_module_init(void) pr_info("can: raw protocol\n"); + err = register_netdevice_notifier(&canraw_notifier); + if (err) + return err; + err = can_proto_register(&raw_can_proto); - if (err < 0) + if (err < 0) { pr_err("can: registration of raw protocol failed\n"); - else - register_netdevice_notifier(&canraw_notifier); + goto register_proto_failed; + } + + return 0; +register_proto_failed: + unregister_netdevice_notifier(&canraw_notifier); return err; } diff --git a/net/core/.gitignore b/net/core/.gitignore deleted file mode 100644 index df1e74372cce..000000000000 --- a/net/core/.gitignore +++ /dev/null @@ -1 +0,0 @@ -dropreason_str.c diff --git a/net/core/Makefile b/net/core/Makefile index e8ce3bd283a6..5857cec87b83 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -5,7 +5,7 @@ obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \ gen_stats.o gen_estimator.o net_namespace.o secure_seq.o \ - flow_dissector.o dropreason_str.o + flow_dissector.o obj-$(CONFIG_SYSCTL) += sysctl_net_core.o @@ -40,23 +40,3 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o obj-$(CONFIG_BPF_SYSCALL) += sock_map.o obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o obj-$(CONFIG_OF) += of_net.o - -clean-files := dropreason_str.c - -quiet_cmd_dropreason_str = GEN $@ -cmd_dropreason_str = awk -F ',' 'BEGIN{ print "\#include <net/dropreason.h>\n"; \ - print "const char * const drop_reasons[] = {" }\ - /^enum skb_drop/ { dr=1; }\ - /^\};/ { dr=0; }\ - /^\tSKB_DROP_REASON_/ {\ - if (dr) {\ - sub(/\tSKB_DROP_REASON_/, "", $$1);\ - printf "\t[SKB_DROP_REASON_%s] = \"%s\",\n", $$1, $$1;\ - }\ - }\ - END{ print "};" }' $< > $@ - -$(obj)/dropreason_str.c: $(srctree)/include/net/dropreason.h - $(call cmd,dropreason_str) - -$(obj)/dropreason_str.o: $(obj)/dropreason_str.c diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 1b7f385643b4..94374d529ea4 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -310,11 +310,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk) static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap, void *owner, u32 size) { + int optmem_max = READ_ONCE(sysctl_optmem_max); struct sock *sk = (struct sock *)owner; /* same check as in sock_kmalloc() */ - if (size <= sysctl_optmem_max && - atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { + if (size <= optmem_max && + atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { atomic_add(size, &sk->sk_omem_alloc); return 0; } diff --git a/net/core/datagram.c b/net/core/datagram.c index 7255531f63ae..e4ff2db40c98 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -677,7 +677,7 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, page_ref_sub(last_head, refs); refs = 0; } - skb_fill_page_desc(skb, frag++, head, start, size); + skb_fill_page_desc_noacc(skb, frag++, head, start, size); } if (refs) page_ref_sub(last_head, refs); diff --git a/net/core/dev.c b/net/core/dev.c index 716df64fcfa5..d66c73c1c734 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1100,7 +1100,7 @@ static int dev_alloc_name_ns(struct net *net, BUG_ON(!net); ret = __dev_alloc_name(net, name, buf); if (ret >= 0) - strlcpy(dev->name, buf, IFNAMSIZ); + strscpy(dev->name, buf, IFNAMSIZ); return ret; } @@ -1137,7 +1137,7 @@ static int dev_get_valid_name(struct net *net, struct net_device *dev, else if (netdev_name_in_use(net, name)) return -EEXIST; else if (dev->name != name) - strlcpy(dev->name, name, IFNAMSIZ); + strscpy(dev->name, name, IFNAMSIZ); return 0; } @@ -4624,7 +4624,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) struct softnet_data *sd; unsigned int old_flow, new_flow; - if (qlen < (netdev_max_backlog >> 1)) + if (qlen < (READ_ONCE(netdev_max_backlog) >> 1)) return false; sd = this_cpu_ptr(&softnet_data); @@ -4672,7 +4672,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, if (!netif_running(skb->dev)) goto drop; qlen = skb_queue_len(&sd->input_pkt_queue); - if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { + if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) { if (qlen) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); @@ -4928,7 +4928,7 @@ static int netif_rx_internal(struct sk_buff *skb) { int ret; - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); trace_netif_rx(skb); @@ -5281,7 +5281,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, int ret = NET_RX_DROP; __be16 type; - net_timestamp_check(!netdev_tstamp_prequeue, skb); + net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb); trace_netif_receive_skb(skb); @@ -5664,7 +5664,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb) { int ret; - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; @@ -5694,7 +5694,7 @@ void netif_receive_skb_list_internal(struct list_head *head) INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); skb_list_del_init(skb); if (!skb_defer_rx_timestamp(skb)) list_add_tail(&skb->list, &sublist); @@ -5918,7 +5918,7 @@ static int process_backlog(struct napi_struct *napi, int quota) net_rps_action_and_irq_enable(sd); } - napi->weight = dev_rx_weight; + napi->weight = READ_ONCE(dev_rx_weight); while (again) { struct sk_buff *skb; @@ -6665,8 +6665,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + - usecs_to_jiffies(netdev_budget_usecs); - int budget = netdev_budget; + usecs_to_jiffies(READ_ONCE(netdev_budget_usecs)); + int budget = READ_ONCE(netdev_budget); LIST_HEAD(list); LIST_HEAD(repoll); @@ -10284,7 +10284,7 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list) return dev; if (time_after(jiffies, warning_time + - netdev_unregister_timeout_secs * HZ)) { + READ_ONCE(netdev_unregister_timeout_secs) * HZ)) { list_for_each_entry(dev, list, todo_list) { pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", dev->name, netdev_refcnt_read(dev)); @@ -10370,9 +10370,7 @@ void netdev_run_todo(void) BUG_ON(!list_empty(&dev->ptype_specific)); WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); -#if IS_ENABLED(CONFIG_DECNET) - WARN_ON(dev->dn_ptr); -#endif + if (dev->priv_destructor) dev->priv_destructor(dev); if (dev->needs_free_netdev) diff --git a/net/core/devlink.c b/net/core/devlink.c index b50bcc18b8d9..7776dc82f88d 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1710,7 +1710,7 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, struct devlink *devlink = info->user_ptr[0]; u32 count; - if (!info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_SPLIT_COUNT)) return -EINVAL; if (!devlink->ops->port_split) return -EOPNOTSUPP; @@ -1838,7 +1838,7 @@ static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, if (!devlink->ops->port_del) return -EOPNOTSUPP; - if (!info->attrs[DEVLINK_ATTR_PORT_INDEX]) { + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_INDEX)) { NL_SET_ERR_MSG_MOD(extack, "Port index is not specified"); return -EINVAL; } @@ -2690,7 +2690,7 @@ static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb, if (err) return err; - if (!info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_POOL_SIZE)) return -EINVAL; size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]); @@ -2900,7 +2900,7 @@ static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb, if (err) return err; - if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD)) return -EINVAL; threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]); @@ -3156,7 +3156,7 @@ static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb, if (err) return err; - if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD)) return -EINVAL; threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]); @@ -3845,7 +3845,7 @@ static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, struct devlink_dpipe_table *table; const char *table_name; - if (!info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME)) return -EINVAL; table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]); @@ -4029,8 +4029,9 @@ static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb, const char *table_name; bool counters_enable; - if (!info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME] || - !info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME) || + GENL_REQ_ATTR_CHECK(info, + DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED)) return -EINVAL; table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]); @@ -4119,8 +4120,8 @@ static int devlink_nl_cmd_resource_set(struct sk_buff *skb, u64 size; int err; - if (!info->attrs[DEVLINK_ATTR_RESOURCE_ID] || - !info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_ID) || + GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_SIZE)) return -EINVAL; resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]); @@ -4742,10 +4743,76 @@ void devlink_flash_update_timeout_notify(struct devlink *devlink, } EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify); +struct devlink_info_req { + struct sk_buff *msg; + void (*version_cb)(const char *version_name, + enum devlink_info_version_type version_type, + void *version_cb_priv); + void *version_cb_priv; +}; + +struct devlink_flash_component_lookup_ctx { + const char *lookup_name; + bool lookup_name_found; +}; + +static void +devlink_flash_component_lookup_cb(const char *version_name, + enum devlink_info_version_type version_type, + void *version_cb_priv) +{ + struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv; + + if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT || + lookup_ctx->lookup_name_found) + return; + + lookup_ctx->lookup_name_found = + !strcmp(lookup_ctx->lookup_name, version_name); +} + +static int devlink_flash_component_get(struct devlink *devlink, + struct nlattr *nla_component, + const char **p_component, + struct netlink_ext_ack *extack) +{ + struct devlink_flash_component_lookup_ctx lookup_ctx = {}; + struct devlink_info_req req = {}; + const char *component; + int ret; + + if (!nla_component) + return 0; + + component = nla_data(nla_component); + + if (!devlink->ops->info_get) { + NL_SET_ERR_MSG_ATTR(extack, nla_component, + "component update is not supported by this device"); + return -EOPNOTSUPP; + } + + lookup_ctx.lookup_name = component; + req.version_cb = devlink_flash_component_lookup_cb; + req.version_cb_priv = &lookup_ctx; + + ret = devlink->ops->info_get(devlink, &req, NULL); + if (ret) + return ret; + + if (!lookup_ctx.lookup_name_found) { + NL_SET_ERR_MSG_ATTR(extack, nla_component, + "selected component is not supported by this device"); + return -EINVAL; + } + *p_component = component; + return 0; +} + static int devlink_nl_cmd_flash_update(struct sk_buff *skb, struct genl_info *info) { - struct nlattr *nla_component, *nla_overwrite_mask, *nla_file_name; + struct nlattr *nla_overwrite_mask, *nla_file_name; struct devlink_flash_update_params params = {}; struct devlink *devlink = info->user_ptr[0]; const char *file_name; @@ -4755,20 +4822,16 @@ static int devlink_nl_cmd_flash_update(struct sk_buff *skb, if (!devlink->ops->flash_update) return -EOPNOTSUPP; - if (!info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME)) return -EINVAL; - supported_params = devlink->ops->supported_flash_update_params; + ret = devlink_flash_component_get(devlink, + info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT], + ¶ms.component, info->extack); + if (ret) + return ret; - nla_component = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT]; - if (nla_component) { - if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT)) { - NL_SET_ERR_MSG_ATTR(info->extack, nla_component, - "component update is not supported by this device"); - return -EOPNOTSUPP; - } - params.component = nla_data(nla_component); - } + supported_params = devlink->ops->supported_flash_update_params; nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK]; if (nla_overwrite_mask) { @@ -4936,10 +4999,8 @@ static int devlink_nl_cmd_selftests_run(struct sk_buff *skb, if (!devlink->ops->selftest_run || !devlink->ops->selftest_check) return -EOPNOTSUPP; - if (!info->attrs[DEVLINK_ATTR_SELFTESTS]) { - NL_SET_ERR_MSG_MOD(info->extack, "selftest required"); + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS)) return -EINVAL; - } attrs = info->attrs[DEVLINK_ATTR_SELFTESTS]; @@ -5393,7 +5454,7 @@ static int devlink_param_type_get_from_info(struct genl_info *info, enum devlink_param_type *param_type) { - if (!info->attrs[DEVLINK_ATTR_PARAM_TYPE]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_TYPE)) return -EINVAL; switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) { @@ -5470,7 +5531,7 @@ devlink_param_get_from_info(struct list_head *param_list, { char *param_name; - if (!info->attrs[DEVLINK_ATTR_PARAM_NAME]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_NAME)) return NULL; param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]); @@ -5536,7 +5597,7 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink, return err; } - if (!info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_VALUE_CMODE)) return -EINVAL; cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]); if (!devlink_param_cmode_is_supported(param, cmode)) @@ -5574,89 +5635,22 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { - struct devlink_param_item *param_item; - struct devlink_port *devlink_port; - struct devlink *devlink; - int start = cb->args[0]; - unsigned long index; - int idx = 0; - int err = 0; - - devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) { - devl_lock(devlink); - list_for_each_entry(devlink_port, &devlink->port_list, list) { - list_for_each_entry(param_item, - &devlink_port->param_list, list) { - if (idx < start) { - idx++; - continue; - } - err = devlink_nl_param_fill(msg, - devlink_port->devlink, - devlink_port->index, param_item, - DEVLINK_CMD_PORT_PARAM_GET, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI); - if (err == -EOPNOTSUPP) { - err = 0; - } else if (err) { - devl_unlock(devlink); - devlink_put(devlink); - goto out; - } - idx++; - } - } - devl_unlock(devlink); - devlink_put(devlink); - } -out: - if (err != -EMSGSIZE) - return err; - - cb->args[0] = idx; + NL_SET_ERR_MSG_MOD(cb->extack, "Port params are not supported"); return msg->len; } static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_port *devlink_port = info->user_ptr[1]; - struct devlink_param_item *param_item; - struct sk_buff *msg; - int err; - - param_item = devlink_param_get_from_info(&devlink_port->param_list, - info); - if (!param_item) - return -EINVAL; - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; - - err = devlink_nl_param_fill(msg, devlink_port->devlink, - devlink_port->index, param_item, - DEVLINK_CMD_PORT_PARAM_GET, - info->snd_portid, info->snd_seq, 0); - if (err) { - nlmsg_free(msg); - return err; - } - - return genlmsg_reply(msg, info); + NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported"); + return -EINVAL; } static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_port *devlink_port = info->user_ptr[1]; - - return __devlink_nl_cmd_param_set_doit(devlink_port->devlink, - devlink_port->index, - &devlink_port->param_list, info, - DEVLINK_CMD_PORT_PARAM_NEW); + NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported"); + return -EINVAL; } static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg, @@ -6056,7 +6050,7 @@ static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb, unsigned int index; int err; - if (!info->attrs[DEVLINK_ATTR_REGION_NAME]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME)) return -EINVAL; if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) { @@ -6189,8 +6183,8 @@ static int devlink_nl_cmd_region_del(struct sk_buff *skb, unsigned int index; u32 snapshot_id; - if (!info->attrs[DEVLINK_ATTR_REGION_NAME] || - !info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME) || + GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_SNAPSHOT_ID)) return -EINVAL; region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]); @@ -6238,7 +6232,7 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info) u8 *data; int err; - if (!info->attrs[DEVLINK_ATTR_REGION_NAME]) { + if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME)) { NL_SET_ERR_MSG_MOD(info->extack, "No region name provided"); return -EINVAL; } @@ -6553,18 +6547,18 @@ out_unlock: return err; } -struct devlink_info_req { - struct sk_buff *msg; -}; - int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name) { + if (!req->msg) + return 0; return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name); } EXPORT_SYMBOL_GPL(devlink_info_driver_name_put); int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn) { + if (!req->msg) + return 0; return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn); } EXPORT_SYMBOL_GPL(devlink_info_serial_number_put); @@ -6572,6 +6566,8 @@ EXPORT_SYMBOL_GPL(devlink_info_serial_number_put); int devlink_info_board_serial_number_put(struct devlink_info_req *req, const char *bsn) { + if (!req->msg) + return 0; return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, bsn); } @@ -6579,11 +6575,19 @@ EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put); static int devlink_info_version_put(struct devlink_info_req *req, int attr, const char *version_name, - const char *version_value) + const char *version_value, + enum devlink_info_version_type version_type) { struct nlattr *nest; int err; + if (req->version_cb) + req->version_cb(version_name, version_type, + req->version_cb_priv); + + if (!req->msg) + return 0; + nest = nla_nest_start_noflag(req->msg, attr); if (!nest) return -EMSGSIZE; @@ -6612,7 +6616,8 @@ int devlink_info_version_fixed_put(struct devlink_info_req *req, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED, - version_name, version_value); + version_name, version_value, + DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put); @@ -6621,25 +6626,49 @@ int devlink_info_version_stored_put(struct devlink_info_req *req, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED, - version_name, version_value); + version_name, version_value, + DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_stored_put); +int devlink_info_version_stored_put_ext(struct devlink_info_req *req, + const char *version_name, + const char *version_value, + enum devlink_info_version_type version_type) +{ + return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED, + version_name, version_value, + version_type); +} +EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext); + int devlink_info_version_running_put(struct devlink_info_req *req, const char *version_name, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING, - version_name, version_value); + version_name, version_value, + DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_running_put); +int devlink_info_version_running_put_ext(struct devlink_info_req *req, + const char *version_name, + const char *version_value, + enum devlink_info_version_type version_type) +{ + return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING, + version_name, version_value, + version_type); +} +EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext); + static int devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { - struct devlink_info_req req; + struct devlink_info_req req = {}; void *hdr; int err; @@ -9513,6 +9542,7 @@ static struct genl_family devlink_nl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = devlink_nl_ops, .n_small_ops = ARRAY_SIZE(devlink_nl_ops), + .resv_start_op = DEVLINK_CMD_SELFTESTS_RUN + 1, .mcgrps = devlink_nl_mcgrps, .n_mcgrps = ARRAY_SIZE(devlink_nl_mcgrps), }; @@ -9846,7 +9876,6 @@ int devl_port_register(struct devlink *devlink, INIT_LIST_HEAD(&devlink_port->reporter_list); mutex_init(&devlink_port->reporters_lock); list_add_tail(&devlink_port->list, &devlink->port_list); - INIT_LIST_HEAD(&devlink_port->param_list); INIT_LIST_HEAD(&devlink_port->region_list); INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn); @@ -12306,8 +12335,8 @@ EXPORT_SYMBOL_GPL(devl_trap_policers_unregister); static void __devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len) { + struct devlink_info_req req = {}; const struct nlattr *nlattr; - struct devlink_info_req req; struct sk_buff *msg; int rem, err; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 75501e1bdd25..f084a4a6b7ab 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -464,7 +464,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink, goto out; hw_entry = &hw_entries->entries[hw_entries->num_entries]; - strlcpy(hw_entry->trap_name, metadata->trap_name, + strscpy(hw_entry->trap_name, metadata->trap_name, NET_DM_MAX_HW_TRAP_NAME_LEN - 1); hw_entry->count = 1; hw_entries->num_entries++; @@ -1645,6 +1645,7 @@ static struct genl_family net_drop_monitor_family __ro_after_init = { .module = THIS_MODULE, .small_ops = dropmon_ops, .n_small_ops = ARRAY_SIZE(dropmon_ops), + .resv_start_op = NET_DM_CMD_STATS_GET + 1, .mcgrps = dropmon_mcgrps, .n_mcgrps = ARRAY_SIZE(dropmon_mcgrps), }; diff --git a/net/core/filter.c b/net/core/filter.c index e8508aaafd27..e872f45399b0 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1214,10 +1214,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); + int optmem_max = READ_ONCE(sysctl_optmem_max); /* same check as in sock_kmalloc() */ - if (filter_size <= sysctl_optmem_max && - atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { + if (filter_size <= optmem_max && + atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) { atomic_add(filter_size, &sk->sk_omem_alloc); return true; } @@ -1548,7 +1549,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) if (IS_ERR(prog)) return PTR_ERR(prog); - if (bpf_prog_size(prog->len) > sysctl_optmem_max) + if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) err = -ENOMEM; else err = reuseport_attach_prog(sk, prog); @@ -1615,7 +1616,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) } } else { /* BPF_PROG_TYPE_SOCKET_FILTER */ - if (bpf_prog_size(prog->len) > sysctl_optmem_max) { + if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) { err = -ENOMEM; goto err_prog_put; } @@ -3009,7 +3010,7 @@ BPF_CALL_0(bpf_get_cgroup_classid_curr) return __task_get_classid(current); } -static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = { +const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = { .func = bpf_get_cgroup_classid_curr, .gpl_only = false, .ret_type = RET_INTEGER, @@ -4488,7 +4489,8 @@ BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key void *to_orig = to; int err; - if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { + if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6 | + BPF_F_TUNINFO_FLAGS)))) { err = -EINVAL; goto err_clear; } @@ -4520,7 +4522,10 @@ set_compat: to->tunnel_id = be64_to_cpu(info->key.tun_id); to->tunnel_tos = info->key.tos; to->tunnel_ttl = info->key.ttl; - to->tunnel_ext = 0; + if (flags & BPF_F_TUNINFO_FLAGS) + to->tunnel_flags = info->key.tun_flags; + else + to->tunnel_ext = 0; if (flags & BPF_F_TUNINFO_IPV6) { memcpy(to->remote_ipv6, &info->key.u.ipv6.src, @@ -5013,359 +5018,259 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = { .arg1_type = ARG_PTR_TO_CTX, }; -static int __bpf_setsockopt(struct sock *sk, int level, int optname, - char *optval, int optlen) -{ - char devname[IFNAMSIZ]; - int val, valbool; - struct net *net; - int ifindex; - int ret = 0; - - if (!sk_fullsock(sk)) +static int sol_socket_sockopt(struct sock *sk, int optname, + char *optval, int *optlen, + bool getopt) +{ + switch (optname) { + case SO_REUSEADDR: + case SO_SNDBUF: + case SO_RCVBUF: + case SO_KEEPALIVE: + case SO_PRIORITY: + case SO_REUSEPORT: + case SO_RCVLOWAT: + case SO_MARK: + case SO_MAX_PACING_RATE: + case SO_BINDTOIFINDEX: + case SO_TXREHASH: + if (*optlen != sizeof(int)) + return -EINVAL; + break; + case SO_BINDTODEVICE: + break; + default: return -EINVAL; + } - if (level == SOL_SOCKET) { - if (optlen != sizeof(int) && optname != SO_BINDTODEVICE) + if (getopt) { + if (optname == SO_BINDTODEVICE) return -EINVAL; - val = *((int *)optval); - valbool = val ? 1 : 0; - - /* Only some socketops are supported */ - switch (optname) { - case SO_RCVBUF: - val = min_t(u32, val, sysctl_rmem_max); - val = min_t(int, val, INT_MAX / 2); - sk->sk_userlocks |= SOCK_RCVBUF_LOCK; - WRITE_ONCE(sk->sk_rcvbuf, - max_t(int, val * 2, SOCK_MIN_RCVBUF)); - break; - case SO_SNDBUF: - val = min_t(u32, val, sysctl_wmem_max); - val = min_t(int, val, INT_MAX / 2); - sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - WRITE_ONCE(sk->sk_sndbuf, - max_t(int, val * 2, SOCK_MIN_SNDBUF)); - break; - case SO_MAX_PACING_RATE: /* 32bit version */ - if (val != ~0U) - cmpxchg(&sk->sk_pacing_status, - SK_PACING_NONE, - SK_PACING_NEEDED); - sk->sk_max_pacing_rate = (val == ~0U) ? - ~0UL : (unsigned int)val; - sk->sk_pacing_rate = min(sk->sk_pacing_rate, - sk->sk_max_pacing_rate); - break; - case SO_PRIORITY: - sk->sk_priority = val; - break; - case SO_RCVLOWAT: - if (val < 0) - val = INT_MAX; - if (sk->sk_socket && sk->sk_socket->ops->set_rcvlowat) - ret = sk->sk_socket->ops->set_rcvlowat(sk, val); - else - WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); - break; - case SO_MARK: - if (sk->sk_mark != val) { - sk->sk_mark = val; - sk_dst_reset(sk); - } - break; - case SO_BINDTODEVICE: - optlen = min_t(long, optlen, IFNAMSIZ - 1); - strncpy(devname, optval, optlen); - devname[optlen] = 0; + return sk_getsockopt(sk, SOL_SOCKET, optname, + KERNEL_SOCKPTR(optval), + KERNEL_SOCKPTR(optlen)); + } - ifindex = 0; - if (devname[0] != '\0') { - struct net_device *dev; + return sk_setsockopt(sk, SOL_SOCKET, optname, + KERNEL_SOCKPTR(optval), *optlen); +} - ret = -ENODEV; +static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, + char *optval, int optlen) +{ + struct tcp_sock *tp = tcp_sk(sk); + unsigned long timeout; + int val; - net = sock_net(sk); - dev = dev_get_by_name(net, devname); - if (!dev) - break; - ifindex = dev->ifindex; - dev_put(dev); - } - fallthrough; - case SO_BINDTOIFINDEX: - if (optname == SO_BINDTOIFINDEX) - ifindex = val; - ret = sock_bindtoindex(sk, ifindex, false); - break; - case SO_KEEPALIVE: - if (sk->sk_prot->keepalive) - sk->sk_prot->keepalive(sk, valbool); - sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); - break; - case SO_REUSEPORT: - sk->sk_reuseport = valbool; - break; - case SO_TXREHASH: - if (val < -1 || val > 1) { - ret = -EINVAL; - break; - } - sk->sk_txrehash = (u8)val; - break; - default: - ret = -EINVAL; - } -#ifdef CONFIG_INET - } else if (level == SOL_IP) { - if (optlen != sizeof(int) || sk->sk_family != AF_INET) - return -EINVAL; + if (optlen != sizeof(int)) + return -EINVAL; - val = *((int *)optval); - /* Only some options are supported */ - switch (optname) { - case IP_TOS: - if (val < -1 || val > 0xff) { - ret = -EINVAL; - } else { - struct inet_sock *inet = inet_sk(sk); + val = *(int *)optval; - if (val == -1) - val = 0; - inet->tos = val; - } - break; - default: - ret = -EINVAL; - } -#if IS_ENABLED(CONFIG_IPV6) - } else if (level == SOL_IPV6) { - if (optlen != sizeof(int) || sk->sk_family != AF_INET6) + /* Only some options are supported */ + switch (optname) { + case TCP_BPF_IW: + if (val <= 0 || tp->data_segs_out > tp->syn_data) + return -EINVAL; + tcp_snd_cwnd_set(tp, val); + break; + case TCP_BPF_SNDCWND_CLAMP: + if (val <= 0) + return -EINVAL; + tp->snd_cwnd_clamp = val; + tp->snd_ssthresh = val; + break; + case TCP_BPF_DELACK_MAX: + timeout = usecs_to_jiffies(val); + if (timeout > TCP_DELACK_MAX || + timeout < TCP_TIMEOUT_MIN) + return -EINVAL; + inet_csk(sk)->icsk_delack_max = timeout; + break; + case TCP_BPF_RTO_MIN: + timeout = usecs_to_jiffies(val); + if (timeout > TCP_RTO_MIN || + timeout < TCP_TIMEOUT_MIN) return -EINVAL; + inet_csk(sk)->icsk_rto_min = timeout; + break; + default: + return -EINVAL; + } - val = *((int *)optval); - /* Only some options are supported */ - switch (optname) { - case IPV6_TCLASS: - if (val < -1 || val > 0xff) { - ret = -EINVAL; - } else { - struct ipv6_pinfo *np = inet6_sk(sk); + return 0; +} - if (val == -1) - val = 0; - np->tclass = val; - } - break; - default: - ret = -EINVAL; - } -#endif - } else if (level == SOL_TCP && - sk->sk_prot->setsockopt == tcp_setsockopt) { - if (optname == TCP_CONGESTION) { - char name[TCP_CA_NAME_MAX]; +static int sol_tcp_sockopt(struct sock *sk, int optname, + char *optval, int *optlen, + bool getopt) +{ + if (sk->sk_prot->setsockopt != tcp_setsockopt) + return -EINVAL; - strncpy(name, optval, min_t(long, optlen, - TCP_CA_NAME_MAX-1)); - name[TCP_CA_NAME_MAX-1] = 0; - ret = tcp_set_congestion_control(sk, name, false, true); - } else { - struct inet_connection_sock *icsk = inet_csk(sk); + switch (optname) { + case TCP_NODELAY: + case TCP_MAXSEG: + case TCP_KEEPIDLE: + case TCP_KEEPINTVL: + case TCP_KEEPCNT: + case TCP_SYNCNT: + case TCP_WINDOW_CLAMP: + case TCP_THIN_LINEAR_TIMEOUTS: + case TCP_USER_TIMEOUT: + case TCP_NOTSENT_LOWAT: + case TCP_SAVE_SYN: + if (*optlen != sizeof(int)) + return -EINVAL; + break; + case TCP_CONGESTION: + if (*optlen < 2) + return -EINVAL; + break; + case TCP_SAVED_SYN: + if (*optlen < 1) + return -EINVAL; + break; + default: + if (getopt) + return -EINVAL; + return bpf_sol_tcp_setsockopt(sk, optname, optval, *optlen); + } + + if (getopt) { + if (optname == TCP_SAVED_SYN) { struct tcp_sock *tp = tcp_sk(sk); - unsigned long timeout; - if (optlen != sizeof(int)) + if (!tp->saved_syn || + *optlen > tcp_saved_syn_len(tp->saved_syn)) return -EINVAL; + memcpy(optval, tp->saved_syn->data, *optlen); + /* It cannot free tp->saved_syn here because it + * does not know if the user space still needs it. + */ + return 0; + } - val = *((int *)optval); - /* Only some options are supported */ - switch (optname) { - case TCP_BPF_IW: - if (val <= 0 || tp->data_segs_out > tp->syn_data) - ret = -EINVAL; - else - tcp_snd_cwnd_set(tp, val); - break; - case TCP_BPF_SNDCWND_CLAMP: - if (val <= 0) { - ret = -EINVAL; - } else { - tp->snd_cwnd_clamp = val; - tp->snd_ssthresh = val; - } - break; - case TCP_BPF_DELACK_MAX: - timeout = usecs_to_jiffies(val); - if (timeout > TCP_DELACK_MAX || - timeout < TCP_TIMEOUT_MIN) - return -EINVAL; - inet_csk(sk)->icsk_delack_max = timeout; - break; - case TCP_BPF_RTO_MIN: - timeout = usecs_to_jiffies(val); - if (timeout > TCP_RTO_MIN || - timeout < TCP_TIMEOUT_MIN) - return -EINVAL; - inet_csk(sk)->icsk_rto_min = timeout; - break; - case TCP_SAVE_SYN: - if (val < 0 || val > 1) - ret = -EINVAL; - else - tp->save_syn = val; - break; - case TCP_KEEPIDLE: - ret = tcp_sock_set_keepidle_locked(sk, val); - break; - case TCP_KEEPINTVL: - if (val < 1 || val > MAX_TCP_KEEPINTVL) - ret = -EINVAL; - else - tp->keepalive_intvl = val * HZ; - break; - case TCP_KEEPCNT: - if (val < 1 || val > MAX_TCP_KEEPCNT) - ret = -EINVAL; - else - tp->keepalive_probes = val; - break; - case TCP_SYNCNT: - if (val < 1 || val > MAX_TCP_SYNCNT) - ret = -EINVAL; - else - icsk->icsk_syn_retries = val; - break; - case TCP_USER_TIMEOUT: - if (val < 0) - ret = -EINVAL; - else - icsk->icsk_user_timeout = val; - break; - case TCP_NOTSENT_LOWAT: - tp->notsent_lowat = val; - sk->sk_write_space(sk); - break; - case TCP_WINDOW_CLAMP: - ret = tcp_set_window_clamp(sk, val); - break; - default: - ret = -EINVAL; - } + if (optname == TCP_CONGESTION) { + if (!inet_csk(sk)->icsk_ca_ops) + return -EINVAL; + /* BPF expects NULL-terminated tcp-cc string */ + optval[--(*optlen)] = '\0'; } -#endif - } else { - ret = -EINVAL; + + return do_tcp_getsockopt(sk, SOL_TCP, optname, + KERNEL_SOCKPTR(optval), + KERNEL_SOCKPTR(optlen)); } - return ret; + + return do_tcp_setsockopt(sk, SOL_TCP, optname, + KERNEL_SOCKPTR(optval), *optlen); } -static int _bpf_setsockopt(struct sock *sk, int level, int optname, - char *optval, int optlen) +static int sol_ip_sockopt(struct sock *sk, int optname, + char *optval, int *optlen, + bool getopt) { - if (sk_fullsock(sk)) - sock_owned_by_me(sk); - return __bpf_setsockopt(sk, level, optname, optval, optlen); + if (sk->sk_family != AF_INET) + return -EINVAL; + + switch (optname) { + case IP_TOS: + if (*optlen != sizeof(int)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if (getopt) + return do_ip_getsockopt(sk, SOL_IP, optname, + KERNEL_SOCKPTR(optval), + KERNEL_SOCKPTR(optlen)); + + return do_ip_setsockopt(sk, SOL_IP, optname, + KERNEL_SOCKPTR(optval), *optlen); } -static int __bpf_getsockopt(struct sock *sk, int level, int optname, - char *optval, int optlen) +static int sol_ipv6_sockopt(struct sock *sk, int optname, + char *optval, int *optlen, + bool getopt) { - if (!sk_fullsock(sk)) - goto err_clear; + if (sk->sk_family != AF_INET6) + return -EINVAL; - if (level == SOL_SOCKET) { - if (optlen != sizeof(int)) - goto err_clear; + switch (optname) { + case IPV6_TCLASS: + case IPV6_AUTOFLOWLABEL: + if (*optlen != sizeof(int)) + return -EINVAL; + break; + default: + return -EINVAL; + } - switch (optname) { - case SO_RCVBUF: - *((int *)optval) = sk->sk_rcvbuf; - break; - case SO_SNDBUF: - *((int *)optval) = sk->sk_sndbuf; - break; - case SO_MARK: - *((int *)optval) = sk->sk_mark; - break; - case SO_PRIORITY: - *((int *)optval) = sk->sk_priority; - break; - case SO_BINDTOIFINDEX: - *((int *)optval) = sk->sk_bound_dev_if; - break; - case SO_REUSEPORT: - *((int *)optval) = sk->sk_reuseport; - break; - case SO_TXREHASH: - *((int *)optval) = sk->sk_txrehash; - break; - default: - goto err_clear; - } -#ifdef CONFIG_INET - } else if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) { - struct inet_connection_sock *icsk; - struct tcp_sock *tp; + if (getopt) + return ipv6_bpf_stub->ipv6_getsockopt(sk, SOL_IPV6, optname, + KERNEL_SOCKPTR(optval), + KERNEL_SOCKPTR(optlen)); - switch (optname) { - case TCP_CONGESTION: - icsk = inet_csk(sk); + return ipv6_bpf_stub->ipv6_setsockopt(sk, SOL_IPV6, optname, + KERNEL_SOCKPTR(optval), *optlen); +} - if (!icsk->icsk_ca_ops || optlen <= 1) - goto err_clear; - strncpy(optval, icsk->icsk_ca_ops->name, optlen); - optval[optlen - 1] = 0; - break; - case TCP_SAVED_SYN: - tp = tcp_sk(sk); +static int __bpf_setsockopt(struct sock *sk, int level, int optname, + char *optval, int optlen) +{ + if (!sk_fullsock(sk)) + return -EINVAL; - if (optlen <= 0 || !tp->saved_syn || - optlen > tcp_saved_syn_len(tp->saved_syn)) - goto err_clear; - memcpy(optval, tp->saved_syn->data, optlen); - break; - default: - goto err_clear; - } - } else if (level == SOL_IP) { - struct inet_sock *inet = inet_sk(sk); + if (level == SOL_SOCKET) + return sol_socket_sockopt(sk, optname, optval, &optlen, false); + else if (IS_ENABLED(CONFIG_INET) && level == SOL_IP) + return sol_ip_sockopt(sk, optname, optval, &optlen, false); + else if (IS_ENABLED(CONFIG_IPV6) && level == SOL_IPV6) + return sol_ipv6_sockopt(sk, optname, optval, &optlen, false); + else if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP) + return sol_tcp_sockopt(sk, optname, optval, &optlen, false); - if (optlen != sizeof(int) || sk->sk_family != AF_INET) - goto err_clear; + return -EINVAL; +} - /* Only some options are supported */ - switch (optname) { - case IP_TOS: - *((int *)optval) = (int)inet->tos; - break; - default: - goto err_clear; - } -#if IS_ENABLED(CONFIG_IPV6) - } else if (level == SOL_IPV6) { - struct ipv6_pinfo *np = inet6_sk(sk); +static int _bpf_setsockopt(struct sock *sk, int level, int optname, + char *optval, int optlen) +{ + if (sk_fullsock(sk)) + sock_owned_by_me(sk); + return __bpf_setsockopt(sk, level, optname, optval, optlen); +} - if (optlen != sizeof(int) || sk->sk_family != AF_INET6) - goto err_clear; +static int __bpf_getsockopt(struct sock *sk, int level, int optname, + char *optval, int optlen) +{ + int err, saved_optlen = optlen; - /* Only some options are supported */ - switch (optname) { - case IPV6_TCLASS: - *((int *)optval) = (int)np->tclass; - break; - default: - goto err_clear; - } -#endif -#endif - } else { - goto err_clear; + if (!sk_fullsock(sk)) { + err = -EINVAL; + goto done; } - return 0; -err_clear: - memset(optval, 0, optlen); - return -EINVAL; + + if (level == SOL_SOCKET) + err = sol_socket_sockopt(sk, optname, optval, &optlen, true); + else if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP) + err = sol_tcp_sockopt(sk, optname, optval, &optlen, true); + else if (IS_ENABLED(CONFIG_INET) && level == SOL_IP) + err = sol_ip_sockopt(sk, optname, optval, &optlen, true); + else if (IS_ENABLED(CONFIG_IPV6) && level == SOL_IPV6) + err = sol_ipv6_sockopt(sk, optname, optval, &optlen, true); + else + err = -EINVAL; + +done: + if (err) + optlen = 0; + if (optlen < saved_optlen) + memset(optval + optlen, 0, saved_optlen - optlen); + return err; } static int _bpf_getsockopt(struct sock *sk, int level, int optname, @@ -7666,34 +7571,23 @@ const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak; static const struct bpf_func_proto * sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { + const struct bpf_func_proto *func_proto; + + func_proto = cgroup_common_func_proto(func_id, prog); + if (func_proto) + return func_proto; + + func_proto = cgroup_current_func_proto(func_id, prog); + if (func_proto) + return func_proto; + switch (func_id) { - /* inet and inet6 sockets are created in a process - * context so there is always a valid uid/gid - */ - case BPF_FUNC_get_current_uid_gid: - return &bpf_get_current_uid_gid_proto; - case BPF_FUNC_get_local_storage: - return &bpf_get_local_storage_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_proto; case BPF_FUNC_get_netns_cookie: return &bpf_get_netns_cookie_sock_proto; case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; - case BPF_FUNC_get_current_pid_tgid: - return &bpf_get_current_pid_tgid_proto; - case BPF_FUNC_get_current_comm: - return &bpf_get_current_comm_proto; -#ifdef CONFIG_CGROUPS - case BPF_FUNC_get_current_cgroup_id: - return &bpf_get_current_cgroup_id_proto; - case BPF_FUNC_get_current_ancestor_cgroup_id: - return &bpf_get_current_ancestor_cgroup_id_proto; -#endif -#ifdef CONFIG_CGROUP_NET_CLASSID - case BPF_FUNC_get_cgroup_classid: - return &bpf_get_cgroup_classid_curr_proto; -#endif case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_cg_sock_proto; case BPF_FUNC_ktime_get_coarse_ns: @@ -7706,12 +7600,17 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) static const struct bpf_func_proto * sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { + const struct bpf_func_proto *func_proto; + + func_proto = cgroup_common_func_proto(func_id, prog); + if (func_proto) + return func_proto; + + func_proto = cgroup_current_func_proto(func_id, prog); + if (func_proto) + return func_proto; + switch (func_id) { - /* inet and inet6 sockets are created in a process - * context so there is always a valid uid/gid - */ - case BPF_FUNC_get_current_uid_gid: - return &bpf_get_current_uid_gid_proto; case BPF_FUNC_bind: switch (prog->expected_attach_type) { case BPF_CGROUP_INET4_CONNECT: @@ -7724,24 +7623,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_socket_cookie_sock_addr_proto; case BPF_FUNC_get_netns_cookie: return &bpf_get_netns_cookie_sock_addr_proto; - case BPF_FUNC_get_local_storage: - return &bpf_get_local_storage_proto; case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; - case BPF_FUNC_get_current_pid_tgid: - return &bpf_get_current_pid_tgid_proto; - case BPF_FUNC_get_current_comm: - return &bpf_get_current_comm_proto; -#ifdef CONFIG_CGROUPS - case BPF_FUNC_get_current_cgroup_id: - return &bpf_get_current_cgroup_id_proto; - case BPF_FUNC_get_current_ancestor_cgroup_id: - return &bpf_get_current_ancestor_cgroup_id_proto; -#endif -#ifdef CONFIG_CGROUP_NET_CLASSID - case BPF_FUNC_get_cgroup_classid: - return &bpf_get_cgroup_classid_curr_proto; -#endif #ifdef CONFIG_INET case BPF_FUNC_sk_lookup_tcp: return &bpf_sock_addr_sk_lookup_tcp_proto; @@ -7822,9 +7705,13 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto __weak; static const struct bpf_func_proto * cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { + const struct bpf_func_proto *func_proto; + + func_proto = cgroup_common_func_proto(func_id, prog); + if (func_proto) + return func_proto; + switch (func_id) { - case BPF_FUNC_get_local_storage: - return &bpf_get_local_storage_proto; case BPF_FUNC_sk_fullsock: return &bpf_sk_fullsock_proto; case BPF_FUNC_sk_storage_get: @@ -8064,6 +7951,12 @@ const struct bpf_func_proto bpf_sock_hash_update_proto __weak; static const struct bpf_func_proto * sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { + const struct bpf_func_proto *func_proto; + + func_proto = cgroup_common_func_proto(func_id, prog); + if (func_proto) + return func_proto; + switch (func_id) { case BPF_FUNC_setsockopt: return &bpf_sock_ops_setsockopt_proto; @@ -8077,8 +7970,6 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sock_hash_update_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_ops_proto; - case BPF_FUNC_get_local_storage: - return &bpf_get_local_storage_proto; case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; case BPF_FUNC_sk_storage_get: @@ -10811,14 +10702,13 @@ int sk_detach_filter(struct sock *sk) } EXPORT_SYMBOL_GPL(sk_detach_filter); -int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, - unsigned int len) +int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len) { struct sock_fprog_kern *fprog; struct sk_filter *filter; int ret = 0; - lock_sock(sk); + sockopt_lock_sock(sk); filter = rcu_dereference_protected(sk->sk_filter, lockdep_sock_is_held(sk)); if (!filter) @@ -10843,7 +10733,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, goto out; ret = -EFAULT; - if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) + if (copy_to_sockptr(optval, fprog->filter, bpf_classic_proglen(fprog))) goto out; /* Instead of bytes, the API requests to return the number @@ -10851,7 +10741,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, */ ret = fprog->len; out: - release_sock(sk); + sockopt_release_sock(sk); return ret; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 764c4cb3fe8f..990429c69ccd 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -866,8 +866,8 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, } } -bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen, unsigned int flags) +u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, + __be16 proto, int nhoff, int hlen, unsigned int flags) { struct bpf_flow_keys *flow_keys = ctx->flow_keys; u32 result; @@ -892,7 +892,7 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, flow_keys->thoff = clamp_t(u16, flow_keys->thoff, flow_keys->nhoff, hlen); - return result == BPF_OK; + return result; } static bool is_pppoe_ses_hdr_valid(const struct pppoe_hdr *hdr) @@ -1008,6 +1008,7 @@ bool __skb_flow_dissect(const struct net *net, }; __be16 n_proto = proto; struct bpf_prog *prog; + u32 result; if (skb) { ctx.skb = skb; @@ -1019,13 +1020,16 @@ bool __skb_flow_dissect(const struct net *net, } prog = READ_ONCE(run_array->items[0].prog); - ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, - hlen, flags); + result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, + hlen, flags); + if (result == BPF_FLOW_DISSECTOR_CONTINUE) + goto dissect_continue; __skb_flow_bpf_to_target(&flow_keys, flow_dissector, target_container); rcu_read_unlock(); - return ret; + return result == BPF_OK; } +dissect_continue: rcu_read_unlock(); } diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index a10335b4ba2d..c8d137ef5980 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -345,7 +345,7 @@ static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats, for_each_possible_cpu(i) { const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); - qstats->qlen += qcpu->backlog; + qstats->qlen += qcpu->qlen; qstats->backlog += qcpu->backlog; qstats->drops += qcpu->drops; qstats->requeues += qcpu->requeues; diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c index 541c7a72a28a..21619c70a82b 100644 --- a/net/core/gro_cells.c +++ b/net/core/gro_cells.c @@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) cell = this_cpu_ptr(gcells->cells); - if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { + if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) { drop: dev_core_stats_rx_dropped_inc(dev); kfree_skb(skb); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 6a8c2596ebab..e93edb810103 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -307,11 +307,35 @@ static int neigh_del_timer(struct neighbour *n) return 0; } -static void pneigh_queue_purge(struct sk_buff_head *list) +static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net) { + struct sk_buff_head tmp; + unsigned long flags; struct sk_buff *skb; - while ((skb = skb_dequeue(list)) != NULL) { + skb_queue_head_init(&tmp); + spin_lock_irqsave(&list->lock, flags); + skb = skb_peek(list); + while (skb != NULL) { + struct sk_buff *skb_next = skb_peek_next(skb, list); + struct net_device *dev = skb->dev; + + if (net == NULL || net_eq(dev_net(dev), net)) { + struct in_device *in_dev; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + if (in_dev) + in_dev->arp_parms->qlen--; + rcu_read_unlock(); + __skb_unlink(skb, list); + __skb_queue_tail(&tmp, skb); + } + skb = skb_next; + } + spin_unlock_irqrestore(&list->lock, flags); + + while ((skb = __skb_dequeue(&tmp))) { dev_put(skb->dev); kfree_skb(skb); } @@ -385,9 +409,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, write_lock_bh(&tbl->lock); neigh_flush_dev(tbl, dev, skip_perm); pneigh_ifdown_and_unlock(tbl, dev); - - del_timer_sync(&tbl->proxy_timer); - pneigh_queue_purge(&tbl->proxy_queue); + pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev)); + if (skb_queue_empty_lockless(&tbl->proxy_queue)) + del_timer_sync(&tbl->proxy_timer); return 0; } @@ -1597,8 +1621,15 @@ static void neigh_proxy_process(struct timer_list *t) if (tdif <= 0) { struct net_device *dev = skb->dev; + struct in_device *in_dev; + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + if (in_dev) + in_dev->arp_parms->qlen--; + rcu_read_unlock(); __skb_unlink(skb, &tbl->proxy_queue); + if (tbl->proxy_redo && netif_running(dev)) { rcu_read_lock(); tbl->proxy_redo(skb); @@ -1623,7 +1654,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, unsigned long sched_next = jiffies + prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY)); - if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) { + if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) { kfree_skb(skb); return; } @@ -1639,6 +1670,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, skb_dst_drop(skb); dev_hold(skb->dev); __skb_queue_tail(&tbl->proxy_queue, skb); + p->qlen++; mod_timer(&tbl->proxy_timer, sched_next); spin_unlock(&tbl->proxy_queue.lock); } @@ -1671,6 +1703,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, refcount_set(&p->refcnt, 1); p->reachable_time = neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); + p->qlen = 0; netdev_hold(dev, &p->dev_tracker, GFP_KERNEL); p->dev = dev; write_pnet(&p->net, net); @@ -1736,6 +1769,7 @@ void neigh_table_init(int index, struct neigh_table *tbl) refcount_set(&tbl->parms.refcnt, 1); tbl->parms.reachable_time = neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); + tbl->parms.qlen = 0; tbl->stats = alloc_percpu(struct neigh_statistics); if (!tbl->stats) @@ -1787,7 +1821,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl) cancel_delayed_work_sync(&tbl->managed_work); cancel_delayed_work_sync(&tbl->gc_work); del_timer_sync(&tbl->proxy_timer); - pneigh_queue_purge(&tbl->proxy_queue); + pneigh_queue_purge(&tbl->proxy_queue, NULL); neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) pr_crit("neighbour leakage\n"); @@ -1819,9 +1853,6 @@ static struct neigh_table *neigh_find_table(int family) case AF_INET6: tbl = neigh_tables[NEIGH_ND_TABLE]; break; - case AF_DECnet: - tbl = neigh_tables[NEIGH_DN_TABLE]; - break; } return tbl; diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 1a6a86693b74..d6a70aeaa503 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -66,7 +66,7 @@ struct update_classid_context { #define UPDATE_CLASSID_BATCH 1000 -static int update_classid_sock(const void *v, struct file *file, unsigned n) +static int update_classid_sock(const void *v, struct file *file, unsigned int n) { struct update_classid_context *ctx = (void *)v; struct socket *sock = sock_from_file(file); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 5d27067b72d5..9be762e1d042 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -556,7 +556,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) if ((delim = strchr(cur, ',')) == NULL) goto parse_failed; *delim = 0; - strlcpy(np->dev_name, cur, sizeof(np->dev_name)); + strscpy(np->dev_name, cur, sizeof(np->dev_name)); cur = delim; } cur++; @@ -610,7 +610,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) int err; np->dev = ndev; - strlcpy(np->dev_name, ndev->name, IFNAMSIZ); + strscpy(np->dev_name, ndev->name, IFNAMSIZ); if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { np_err(np, "%s doesn't support polling, aborting\n", diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ac45328607f7..8de9fb32db08 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -866,14 +866,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition) break; case IF_OPER_TESTING: - if (operstate == IF_OPER_UP || - operstate == IF_OPER_UNKNOWN) + if (netif_oper_up(dev)) operstate = IF_OPER_TESTING; break; case IF_OPER_DORMANT: - if (operstate == IF_OPER_UP || - operstate == IF_OPER_UNKNOWN) + if (netif_oper_up(dev)) operstate = IF_OPER_DORMANT; break; } @@ -1059,6 +1057,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(1) /* IFLA_CARRIER */ + nla_total_size(4) /* IFLA_PROMISCUITY */ + + nla_total_size(4) /* IFLA_ALLMULTI */ + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ @@ -1767,6 +1766,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) || nla_put_u32(skb, IFLA_GROUP, dev->group) || nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || + nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) || nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) || nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) || @@ -1928,6 +1928,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 }, [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT }, [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT }, + [IFLA_ALLMULTI] = { .type = NLA_REJECT }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@ -6070,6 +6071,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) && !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) { NL_SET_ERR_MSG(extack, "Bulk delete is not supported"); + module_put(owner); goto err_unlock; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 974bbbbe7138..f1b8b20fc20b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -91,7 +91,11 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init; int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; EXPORT_SYMBOL(sysctl_max_skb_frags); -/* The array 'drop_reasons' is auto-generated in dropreason_str.c */ +#undef FN +#define FN(reason) [SKB_DROP_REASON_##reason] = #reason, +const char * const drop_reasons[] = { + DEFINE_DROP_REASON(FN, FN) +}; EXPORT_SYMBOL(drop_reasons); /** @@ -777,9 +781,10 @@ EXPORT_SYMBOL(__kfree_skb); * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' * tracepoint. */ -void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) +void __fix_address +kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) { - if (!skb_unref(skb)) + if (unlikely(!skb_unref(skb))) return; DEBUG_NET_WARN_ON_ONCE(reason <= 0 || reason >= SKB_DROP_REASON_MAX); @@ -4205,9 +4210,8 @@ normal: SKB_GSO_CB(nskb)->csum_start = skb_headroom(nskb) + doffset; } else { - skb_copy_bits(head_skb, offset, - skb_put(nskb, len), - len); + if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) + goto err; } continue; } @@ -4798,7 +4802,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) { bool ret; - if (likely(sysctl_tstamp_allow_data || tsonly)) + if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) return true; read_lock_bh(&sk->sk_callback_lock); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index f47338d89d5d..188f8558d27d 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -461,7 +461,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, if (copied == len) break; - } while (!sg_is_last(sge)); + } while ((i != msg_rx->sg.end) && !sg_is_last(sge)); if (unlikely(peek)) { msg_rx = sk_psock_next_msg(psock, msg_rx); @@ -471,7 +471,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, } msg_rx->sg.start = i; - if (!sge->length && sg_is_last(sge)) { + if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) { msg_rx = sk_psock_dequeue_msg(psock); kfree_sk_msg(msg_rx); } @@ -1194,8 +1194,9 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb) ret = bpf_prog_run_pin_on_cpu(prog, skb); ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); } - if (sk_psock_verdict_apply(psock, skb, ret) < 0) - len = 0; + ret = sk_psock_verdict_apply(psock, skb, ret); + if (ret < 0) + len = ret; out: rcu_read_unlock(); return len; diff --git a/net/core/sock.c b/net/core/sock.c index 4cb957d934a2..eeb6cbac6f49 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -703,15 +703,17 @@ static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) goto out; } - return sock_bindtoindex(sk, index, true); + sockopt_lock_sock(sk); + ret = sock_bindtoindex_locked(sk, index); + sockopt_release_sock(sk); out: #endif return ret; } -static int sock_getbindtodevice(struct sock *sk, char __user *optval, - int __user *optlen, int len) +static int sock_getbindtodevice(struct sock *sk, sockptr_t optval, + sockptr_t optlen, int len) { int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES @@ -735,12 +737,12 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval, len = strlen(devname) + 1; ret = -EFAULT; - if (copy_to_user(optval, devname, len)) + if (copy_to_sockptr(optval, devname, len)) goto out; zero: ret = -EFAULT; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) goto out; ret = 0; @@ -1036,17 +1038,51 @@ static int sock_reserve_memory(struct sock *sk, int bytes) return 0; } +void sockopt_lock_sock(struct sock *sk) +{ + /* When current->bpf_ctx is set, the setsockopt is called from + * a bpf prog. bpf has ensured the sk lock has been + * acquired before calling setsockopt(). + */ + if (has_current_bpf_ctx()) + return; + + lock_sock(sk); +} +EXPORT_SYMBOL(sockopt_lock_sock); + +void sockopt_release_sock(struct sock *sk) +{ + if (has_current_bpf_ctx()) + return; + + release_sock(sk); +} +EXPORT_SYMBOL(sockopt_release_sock); + +bool sockopt_ns_capable(struct user_namespace *ns, int cap) +{ + return has_current_bpf_ctx() || ns_capable(ns, cap); +} +EXPORT_SYMBOL(sockopt_ns_capable); + +bool sockopt_capable(int cap) +{ + return has_current_bpf_ctx() || capable(cap); +} +EXPORT_SYMBOL(sockopt_capable); + /* * This is meant for all protocols to use and covers goings on * at the socket level. Everything here is generic. */ -int sock_setsockopt(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int optlen) +int sk_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct so_timestamping timestamping; + struct socket *sock = sk->sk_socket; struct sock_txtime sk_txtime; - struct sock *sk = sock->sk; int val; int valbool; struct linger ling; @@ -1067,11 +1103,11 @@ int sock_setsockopt(struct socket *sock, int level, int optname, valbool = val ? 1 : 0; - lock_sock(sk); + sockopt_lock_sock(sk); switch (optname) { case SO_DEBUG: - if (val && !capable(CAP_NET_ADMIN)) + if (val && !sockopt_capable(CAP_NET_ADMIN)) ret = -EACCES; else sock_valbool_flag(sk, SOCK_DBG, valbool); @@ -1101,7 +1137,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ - val = min_t(u32, val, sysctl_wmem_max); + val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); set_sndbuf: /* Ensure val * 2 fits into an int, to prevent max_t() * from treating it as a negative value. @@ -1115,7 +1151,7 @@ set_sndbuf: break; case SO_SNDBUFFORCE: - if (!capable(CAP_NET_ADMIN)) { + if (!sockopt_capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } @@ -1133,11 +1169,11 @@ set_sndbuf: * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ - __sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max)); + __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); break; case SO_RCVBUFFORCE: - if (!capable(CAP_NET_ADMIN)) { + if (!sockopt_capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } @@ -1164,8 +1200,8 @@ set_sndbuf: case SO_PRIORITY: if ((val >= 0 && val <= 6) || - ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || - ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) + sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || + sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) sk->sk_priority = val; else ret = -EPERM; @@ -1228,7 +1264,7 @@ set_sndbuf: case SO_RCVLOWAT: if (val < 0) val = INT_MAX; - if (sock->ops->set_rcvlowat) + if (sock && sock->ops->set_rcvlowat) ret = sock->ops->set_rcvlowat(sk, val); else WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); @@ -1310,8 +1346,8 @@ set_sndbuf: clear_bit(SOCK_PASSSEC, &sock->flags); break; case SO_MARK: - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && - !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && + !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { ret = -EPERM; break; } @@ -1319,8 +1355,8 @@ set_sndbuf: __sock_set_mark(sk, val); break; case SO_RCVMARK: - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && - !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && + !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { ret = -EPERM; break; } @@ -1354,7 +1390,7 @@ set_sndbuf: #ifdef CONFIG_NET_RX_BUSY_POLL case SO_BUSY_POLL: /* allow unprivileged users to decrease the value */ - if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) + if ((val > sk->sk_ll_usec) && !sockopt_capable(CAP_NET_ADMIN)) ret = -EPERM; else { if (val < 0) @@ -1364,13 +1400,13 @@ set_sndbuf: } break; case SO_PREFER_BUSY_POLL: - if (valbool && !capable(CAP_NET_ADMIN)) + if (valbool && !sockopt_capable(CAP_NET_ADMIN)) ret = -EPERM; else WRITE_ONCE(sk->sk_prefer_busy_poll, valbool); break; case SO_BUSY_POLL_BUDGET: - if (val > READ_ONCE(sk->sk_busy_poll_budget) && !capable(CAP_NET_ADMIN)) { + if (val > READ_ONCE(sk->sk_busy_poll_budget) && !sockopt_capable(CAP_NET_ADMIN)) { ret = -EPERM; } else { if (val < 0 || val > U16_MAX) @@ -1441,7 +1477,7 @@ set_sndbuf: * scheduler has enough safe guards. */ if (sk_txtime.clockid != CLOCK_MONOTONIC && - !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { ret = -EPERM; break; } @@ -1496,9 +1532,16 @@ set_sndbuf: ret = -ENOPROTOOPT; break; } - release_sock(sk); + sockopt_release_sock(sk); return ret; } + +int sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + return sk_setsockopt(sock->sk, level, optname, + optval, optlen); +} EXPORT_SYMBOL(sock_setsockopt); static const struct cred *sk_get_peer_cred(struct sock *sk) @@ -1525,22 +1568,25 @@ static void cred_to_ucred(struct pid *pid, const struct cred *cred, } } -static int groups_to_user(gid_t __user *dst, const struct group_info *src) +static int groups_to_user(sockptr_t dst, const struct group_info *src) { struct user_namespace *user_ns = current_user_ns(); int i; - for (i = 0; i < src->ngroups; i++) - if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i)) + for (i = 0; i < src->ngroups; i++) { + gid_t gid = from_kgid_munged(user_ns, src->gid[i]); + + if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid))) return -EFAULT; + } return 0; } -int sock_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) +int sk_getsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, sockptr_t optlen) { - struct sock *sk = sock->sk; + struct socket *sock = sk->sk_socket; union { int val; @@ -1557,7 +1603,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, int lv = sizeof(int); int len; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len < 0) return -EINVAL; @@ -1692,7 +1738,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); spin_unlock(&sk->sk_peer_lock); - if (copy_to_user(optval, &peercred, len)) + if (copy_to_sockptr(optval, &peercred, len)) return -EFAULT; goto lenout; } @@ -1710,11 +1756,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname, if (len < n * sizeof(gid_t)) { len = n * sizeof(gid_t); put_cred(cred); - return put_user(len, optlen) ? -EFAULT : -ERANGE; + return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE; } len = n * sizeof(gid_t); - ret = groups_to_user((gid_t __user *)optval, cred->group_info); + ret = groups_to_user(optval, cred->group_info); put_cred(cred); if (ret) return ret; @@ -1730,7 +1776,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, return -ENOTCONN; if (lv < len) return -EINVAL; - if (copy_to_user(optval, address, len)) + if (copy_to_sockptr(optval, address, len)) return -EFAULT; goto lenout; } @@ -1747,7 +1793,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_PEERSEC: - return security_socket_getpeersec_stream(sock, optval, optlen, len); + return security_socket_getpeersec_stream(sock, optval.user, optlen.user, len); case SO_MARK: v.val = sk->sk_mark; @@ -1779,7 +1825,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, return sock_getbindtodevice(sk, optval, optlen, len); case SO_GET_FILTER: - len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); + len = sk_get_filter(sk, optval, len); if (len < 0) return len; @@ -1827,7 +1873,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, sk_get_meminfo(sk, meminfo); len = min_t(unsigned int, len, sizeof(meminfo)); - if (copy_to_user(optval, &meminfo, len)) + if (copy_to_sockptr(optval, &meminfo, len)) return -EFAULT; goto lenout; @@ -1896,14 +1942,22 @@ int sock_getsockopt(struct socket *sock, int level, int optname, if (len > lv) len = lv; - if (copy_to_user(optval, &v, len)) + if (copy_to_sockptr(optval, &v, len)) return -EFAULT; lenout: - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; return 0; } +int sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + return sk_getsockopt(sock->sk, level, optname, + USER_SOCKPTR(optval), + USER_SOCKPTR(optlen)); +} + /* * Initialize an sk_lock. * @@ -2536,7 +2590,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > - sysctl_optmem_max) + READ_ONCE(sysctl_optmem_max)) return NULL; skb = alloc_skb(size, priority); @@ -2554,8 +2608,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, */ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { - if ((unsigned int)size <= sysctl_optmem_max && - atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { + int optmem_max = READ_ONCE(sysctl_optmem_max); + + if ((unsigned int)size <= optmem_max && + atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { void *mem; /* First do the add, to avoid the race if kmalloc * might sleep. @@ -3309,8 +3365,8 @@ void sock_init_data(struct socket *sock, struct sock *sk) timer_setup(&sk->sk_timer, NULL, 0); sk->sk_allocation = GFP_KERNEL; - sk->sk_rcvbuf = sysctl_rmem_default; - sk->sk_sndbuf = sysctl_wmem_default; + sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); + sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); sk->sk_state = TCP_CLOSE; sk_set_socket(sk, sock); @@ -3365,7 +3421,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) #ifdef CONFIG_NET_RX_BUSY_POLL sk->sk_napi_id = 0; - sk->sk_ll_usec = sysctl_net_busy_read; + sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); #endif sk->sk_max_pacing_rate = ~0UL; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 9a9fb9487d63..a660baedd9e7 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -41,7 +41,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); - stab = kzalloc(sizeof(*stab), GFP_USER | __GFP_ACCOUNT); + stab = bpf_map_area_alloc(sizeof(*stab), NUMA_NO_NODE); if (!stab) return ERR_PTR(-ENOMEM); @@ -52,7 +52,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) sizeof(struct sock *), stab->map.numa_node); if (!stab->sks) { - kfree(stab); + bpf_map_area_free(stab); return ERR_PTR(-ENOMEM); } @@ -361,7 +361,7 @@ static void sock_map_free(struct bpf_map *map) synchronize_rcu(); bpf_map_area_free(stab->sks); - kfree(stab); + bpf_map_area_free(stab); } static void sock_map_release_progs(struct bpf_map *map) @@ -1085,7 +1085,7 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) if (attr->key_size > MAX_BPF_STACK) return ERR_PTR(-E2BIG); - htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT); + htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); if (!htab) return ERR_PTR(-ENOMEM); @@ -1115,7 +1115,7 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) return &htab->map; free_htab: - kfree(htab); + bpf_map_area_free(htab); return ERR_PTR(err); } @@ -1168,7 +1168,7 @@ static void sock_hash_free(struct bpf_map *map) synchronize_rcu(); bpf_map_area_free(htab->buckets); - kfree(htab); + bpf_map_area_free(htab); } static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 71a13596ea2b..5b1ce656baa1 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -29,7 +29,6 @@ static int int_3600 = 3600; static int min_sndbuf = SOCK_MIN_SNDBUF; static int min_rcvbuf = SOCK_MIN_RCVBUF; static int max_skb_frags = MAX_SKB_FRAGS; -static long long_max __maybe_unused = LONG_MAX; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -234,14 +233,17 @@ static int set_default_qdisc(struct ctl_table *table, int write, static int proc_do_dev_weight(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { - int ret; + static DEFINE_MUTEX(dev_weight_mutex); + int ret, weight; + mutex_lock(&dev_weight_mutex); ret = proc_dointvec(table, write, buffer, lenp, ppos); - if (ret != 0) - return ret; - - dev_rx_weight = weight_p * dev_weight_rx_bias; - dev_tx_weight = weight_p * dev_weight_tx_bias; + if (!ret && write) { + weight = READ_ONCE(weight_p); + WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias); + WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias); + } + mutex_unlock(&dev_weight_mutex); return ret; } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index da6e3b20cd75..6a6e121dc00c 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -45,10 +45,11 @@ static unsigned int dccp_v4_pernet_id __read_mostly; int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; + struct inet_bind_hashbucket *prev_addr_hashbucket = NULL; + __be32 daddr, nexthop, prev_sk_rcv_saddr; struct inet_sock *inet = inet_sk(sk); struct dccp_sock *dp = dccp_sk(sk); __be16 orig_sport, orig_dport; - __be32 daddr, nexthop; struct flowi4 *fl4; struct rtable *rt; int err; @@ -89,9 +90,29 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (inet_opt == NULL || !inet_opt->opt.srr) daddr = fl4->daddr; - if (inet->inet_saddr == 0) + if (inet->inet_saddr == 0) { + if (inet_csk(sk)->icsk_bind2_hash) { + prev_addr_hashbucket = + inet_bhashfn_portaddr(&dccp_hashinfo, sk, + sock_net(sk), + inet->inet_num); + prev_sk_rcv_saddr = sk->sk_rcv_saddr; + } inet->inet_saddr = fl4->saddr; + } + sk_rcv_saddr_set(sk, inet->inet_saddr); + + if (prev_addr_hashbucket) { + err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk); + if (err) { + inet->inet_saddr = 0; + sk_rcv_saddr_set(sk, prev_sk_rcv_saddr); + ip_rt_put(rt); + return err; + } + } + inet->inet_dport = usin->sin_port; sk_daddr_set(sk, daddr); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index fd44638ec16b..e57b43006074 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -934,8 +934,26 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, } if (saddr == NULL) { + struct inet_bind_hashbucket *prev_addr_hashbucket = NULL; + struct in6_addr prev_v6_rcv_saddr; + + if (icsk->icsk_bind2_hash) { + prev_addr_hashbucket = inet_bhashfn_portaddr(&dccp_hashinfo, + sk, sock_net(sk), + inet->inet_num); + prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr; + } + saddr = &fl6.saddr; sk->sk_v6_rcv_saddr = *saddr; + + if (prev_addr_hashbucket) { + err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk); + if (err) { + sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr; + goto failure; + } + } } /* set the source address */ diff --git a/net/dccp/proto.c b/net/dccp/proto.c index e13641c65f88..7cd4a6cc99fc 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1120,6 +1120,12 @@ static int __init dccp_init(void) SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); if (!dccp_hashinfo.bind_bucket_cachep) goto out_free_hashinfo2; + dccp_hashinfo.bind2_bucket_cachep = + kmem_cache_create("dccp_bind2_bucket", + sizeof(struct inet_bind2_bucket), 0, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); + if (!dccp_hashinfo.bind2_bucket_cachep) + goto out_free_bind_bucket_cachep; /* * Size and allocate the main established and bind bucket @@ -1150,7 +1156,7 @@ static int __init dccp_init(void) if (!dccp_hashinfo.ehash) { DCCP_CRIT("Failed to allocate DCCP established hash table"); - goto out_free_bind_bucket_cachep; + goto out_free_bind2_bucket_cachep; } for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) @@ -1176,14 +1182,24 @@ static int __init dccp_init(void) goto out_free_dccp_locks; } + dccp_hashinfo.bhash2 = (struct inet_bind_hashbucket *) + __get_free_pages(GFP_ATOMIC | __GFP_NOWARN, bhash_order); + + if (!dccp_hashinfo.bhash2) { + DCCP_CRIT("Failed to allocate DCCP bind2 hash table"); + goto out_free_dccp_bhash; + } + for (i = 0; i < dccp_hashinfo.bhash_size; i++) { spin_lock_init(&dccp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain); + spin_lock_init(&dccp_hashinfo.bhash2[i].lock); + INIT_HLIST_HEAD(&dccp_hashinfo.bhash2[i].chain); } rc = dccp_mib_init(); if (rc) - goto out_free_dccp_bhash; + goto out_free_dccp_bhash2; rc = dccp_ackvec_init(); if (rc) @@ -1207,30 +1223,38 @@ out_ackvec_exit: dccp_ackvec_exit(); out_free_dccp_mib: dccp_mib_exit(); +out_free_dccp_bhash2: + free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order); out_free_dccp_bhash: free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); out_free_dccp_locks: inet_ehash_locks_free(&dccp_hashinfo); out_free_dccp_ehash: free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); +out_free_bind2_bucket_cachep: + kmem_cache_destroy(dccp_hashinfo.bind2_bucket_cachep); out_free_bind_bucket_cachep: kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); out_free_hashinfo2: inet_hashinfo2_free_mod(&dccp_hashinfo); out_fail: dccp_hashinfo.bhash = NULL; + dccp_hashinfo.bhash2 = NULL; dccp_hashinfo.ehash = NULL; dccp_hashinfo.bind_bucket_cachep = NULL; + dccp_hashinfo.bind2_bucket_cachep = NULL; return rc; } static void __exit dccp_fini(void) { + int bhash_order = get_order(dccp_hashinfo.bhash_size * + sizeof(struct inet_bind_hashbucket)); + ccid_cleanup_builtins(); dccp_mib_exit(); - free_pages((unsigned long)dccp_hashinfo.bhash, - get_order(dccp_hashinfo.bhash_size * - sizeof(struct inet_bind_hashbucket))); + free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); + free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order); free_pages((unsigned long)dccp_hashinfo.ehash, get_order((dccp_hashinfo.ehash_mask + 1) * sizeof(struct inet_ehash_bucket))); diff --git a/net/decnet/Kconfig b/net/decnet/Kconfig deleted file mode 100644 index 24336bdb1054..000000000000 --- a/net/decnet/Kconfig +++ /dev/null @@ -1,43 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# DECnet configuration -# -config DECNET - tristate "DECnet Support" - help - The DECnet networking protocol was used in many products made by - Digital (now Compaq). It provides reliable stream and sequenced - packet communications over which run a variety of services similar - to those which run over TCP/IP. - - To find some tools to use with the kernel layer support, please - look at Patrick Caulfield's web site: - <http://linux-decnet.sourceforge.net/>. - - More detailed documentation is available in - <file:Documentation/networking/decnet.rst>. - - Be sure to say Y to "/proc file system support" and "Sysctl support" - below when using DECnet, since you will need sysctl support to aid - in configuration at run time. - - The DECnet code is also available as a module ( = code which can be - inserted in and removed from the running kernel whenever you want). - The module is called decnet. - -config DECNET_ROUTER - bool "DECnet: router support" - depends on DECNET - select FIB_RULES - help - Add support for turning your DECnet Endnode into a level 1 or 2 - router. This is an experimental, but functional option. If you - do say Y here, then make sure that you also say Y to "Kernel/User - network link driver", "Routing messages" and "Network packet - filtering". The first two are required to allow configuration via - rtnetlink (you will need Alexey Kuznetsov's iproute2 package - from <ftp://ftp.tux.org/pub/net/ip-routing/>). The "Network packet - filtering" option will be required for the forthcoming routing daemon - to work. - - See <file:Documentation/networking/decnet.rst> for more information. diff --git a/net/decnet/Makefile b/net/decnet/Makefile deleted file mode 100644 index 07b38e441b2d..000000000000 --- a/net/decnet/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -obj-$(CONFIG_DECNET) += decnet.o - -decnet-y := af_decnet.o dn_nsp_in.o dn_nsp_out.o \ - dn_route.o dn_dev.o dn_neigh.o dn_timer.o -decnet-$(CONFIG_DECNET_ROUTER) += dn_fib.o dn_rules.o dn_table.o -decnet-y += sysctl_net_decnet.o - -obj-$(CONFIG_NETFILTER) += netfilter/ diff --git a/net/decnet/README b/net/decnet/README deleted file mode 100644 index 60e7ec88c81f..000000000000 --- a/net/decnet/README +++ /dev/null @@ -1,8 +0,0 @@ - Linux DECnet Project - ====================== - -The documentation for this kernel subsystem is available in the -Documentation/networking subdirectory of this distribution and also -on line at http://www.chygwyn.com/DECnet/ - -Steve Whitehouse <SteveW@ACM.org> diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c deleted file mode 100644 index 6582dfdfb932..000000000000 --- a/net/decnet/af_decnet.c +++ /dev/null @@ -1,2404 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Socket Layer Interface - * - * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com> - * Patrick Caulfield <patrick@pandh.demon.co.uk> - * - * Changes: - * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's - * version of the code. Original copyright preserved - * below. - * Steve Whitehouse: Some bug fixes, cleaning up some code to make it - * compatible with my routing layer. - * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick - * Caulfield. - * Steve Whitehouse: Further bug fixes, checking module code still works - * with new routing layer. - * Steve Whitehouse: Additional set/get_sockopt() calls. - * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new - * code. - * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like - * way. Didn't manage it entirely, but its better. - * Steve Whitehouse: ditto for sendmsg(). - * Steve Whitehouse: A selection of bug fixes to various things. - * Steve Whitehouse: Added TIOCOUTQ ioctl. - * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username. - * Steve Whitehouse: Fixes to connect() error returns. - * Patrick Caulfield: Fixes to delayed acceptance logic. - * David S. Miller: New socket locking - * Steve Whitehouse: Socket list hashing/locking - * Arnaldo C. Melo: use capable, not suser - * Steve Whitehouse: Removed unused code. Fix to use sk->allocation - * when required. - * Patrick Caulfield: /proc/net/decnet now has object name/number - * Steve Whitehouse: Fixed local port allocation, hashed sk list - * Matthew Wilcox: Fixes for dn_ioctl() - * Steve Whitehouse: New connect/accept logic to allow timeouts and - * prepare for sendpage etc. - */ - - -/****************************************************************************** - (c) 1995-1998 E.M. Serrat emserrat@geocities.com - - -HISTORY: - -Version Kernel Date Author/Comments -------- ------ ---- --------------- -Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat - (emserrat@geocities.com) - - First Development of DECnet Socket La- - yer for Linux. Only supports outgoing - connections. - -Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield - (patrick@pandh.demon.co.uk) - - Port to new kernel development version. - -Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat - (emserrat@geocities.com) - _ - Added support for incoming connections - so we can start developing server apps - on Linux. - - - Module Support -Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat - (emserrat@geocities.com) - _ - Added support for X11R6.4. Now we can - use DECnet transport for X on Linux!!! - - -Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat - (emserrat@geocities.com) - Removed bugs on flow control - Removed bugs on incoming accessdata - order - - -Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat - dn_recvmsg fixes - - Patrick J. Caulfield - dn_bind fixes -*******************************************************************************/ - -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/slab.h> -#include <linux/socket.h> -#include <linux/in.h> -#include <linux/kernel.h> -#include <linux/sched/signal.h> -#include <linux/timer.h> -#include <linux/string.h> -#include <linux/sockios.h> -#include <linux/net.h> -#include <linux/netdevice.h> -#include <linux/inet.h> -#include <linux/route.h> -#include <linux/netfilter.h> -#include <linux/seq_file.h> -#include <net/sock.h> -#include <net/tcp_states.h> -#include <net/flow.h> -#include <asm/ioctls.h> -#include <linux/capability.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/proc_fs.h> -#include <linux/stat.h> -#include <linux/init.h> -#include <linux/poll.h> -#include <linux/jiffies.h> -#include <net/net_namespace.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/fib_rules.h> -#include <net/tcp.h> -#include <net/dn.h> -#include <net/dn_nsp.h> -#include <net/dn_dev.h> -#include <net/dn_route.h> -#include <net/dn_fib.h> -#include <net/dn_neigh.h> - -struct dn_sock { - struct sock sk; - struct dn_scp scp; -}; - -static void dn_keepalive(struct sock *sk); - -#define DN_SK_HASH_SHIFT 8 -#define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT) -#define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1) - - -static const struct proto_ops dn_proto_ops; -static DEFINE_RWLOCK(dn_hash_lock); -static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; -static struct hlist_head dn_wild_sk; -static atomic_long_t decnet_memory_allocated; -static DEFINE_PER_CPU(int, decnet_memory_per_cpu_fw_alloc); - -static int __dn_setsockopt(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int optlen, int flags); -static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); - -static struct hlist_head *dn_find_list(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - if (scp->addr.sdn_flags & SDF_WILD) - return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL; - - return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK]; -} - -/* - * Valid ports are those greater than zero and not already in use. - */ -static int check_port(__le16 port) -{ - struct sock *sk; - - if (port == 0) - return -1; - - sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { - struct dn_scp *scp = DN_SK(sk); - if (scp->addrloc == port) - return -1; - } - return 0; -} - -static unsigned short port_alloc(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - static unsigned short port = 0x2000; - unsigned short i_port = port; - - while(check_port(cpu_to_le16(++port)) != 0) { - if (port == i_port) - return 0; - } - - scp->addrloc = cpu_to_le16(port); - - return 1; -} - -/* - * Since this is only ever called from user - * level, we don't need a write_lock() version - * of this. - */ -static int dn_hash_sock(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - struct hlist_head *list; - int rv = -EUSERS; - - BUG_ON(sk_hashed(sk)); - - write_lock_bh(&dn_hash_lock); - - if (!scp->addrloc && !port_alloc(sk)) - goto out; - - rv = -EADDRINUSE; - if ((list = dn_find_list(sk)) == NULL) - goto out; - - sk_add_node(sk, list); - rv = 0; -out: - write_unlock_bh(&dn_hash_lock); - return rv; -} - -static void dn_unhash_sock(struct sock *sk) -{ - write_lock(&dn_hash_lock); - sk_del_node_init(sk); - write_unlock(&dn_hash_lock); -} - -static void dn_unhash_sock_bh(struct sock *sk) -{ - write_lock_bh(&dn_hash_lock); - sk_del_node_init(sk); - write_unlock_bh(&dn_hash_lock); -} - -static struct hlist_head *listen_hash(struct sockaddr_dn *addr) -{ - int i; - unsigned int hash = addr->sdn_objnum; - - if (hash == 0) { - hash = addr->sdn_objnamel; - for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) { - hash ^= addr->sdn_objname[i]; - hash ^= (hash << 3); - } - } - - return &dn_sk_hash[hash & DN_SK_HASH_MASK]; -} - -/* - * Called to transform a socket from bound (i.e. with a local address) - * into a listening socket (doesn't need a local port number) and rehashes - * based upon the object name/number. - */ -static void dn_rehash_sock(struct sock *sk) -{ - struct hlist_head *list; - struct dn_scp *scp = DN_SK(sk); - - if (scp->addr.sdn_flags & SDF_WILD) - return; - - write_lock_bh(&dn_hash_lock); - sk_del_node_init(sk); - DN_SK(sk)->addrloc = 0; - list = listen_hash(&DN_SK(sk)->addr); - sk_add_node(sk, list); - write_unlock_bh(&dn_hash_lock); -} - -int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type) -{ - int len = 2; - - *buf++ = type; - - switch (type) { - case 0: - *buf++ = sdn->sdn_objnum; - break; - case 1: - *buf++ = 0; - *buf++ = le16_to_cpu(sdn->sdn_objnamel); - memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel)); - len = 3 + le16_to_cpu(sdn->sdn_objnamel); - break; - case 2: - memset(buf, 0, 5); - buf += 5; - *buf++ = le16_to_cpu(sdn->sdn_objnamel); - memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel)); - len = 7 + le16_to_cpu(sdn->sdn_objnamel); - break; - } - - return len; -} - -/* - * On reception of usernames, we handle types 1 and 0 for destination - * addresses only. Types 2 and 4 are used for source addresses, but the - * UIC, GIC are ignored and they are both treated the same way. Type 3 - * is never used as I've no idea what its purpose might be or what its - * format is. - */ -int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt) -{ - unsigned char type; - int size = len; - int namel = 12; - - sdn->sdn_objnum = 0; - sdn->sdn_objnamel = cpu_to_le16(0); - memset(sdn->sdn_objname, 0, DN_MAXOBJL); - - if (len < 2) - return -1; - - len -= 2; - *fmt = *data++; - type = *data++; - - switch (*fmt) { - case 0: - sdn->sdn_objnum = type; - return 2; - case 1: - namel = 16; - break; - case 2: - len -= 4; - data += 4; - break; - case 4: - len -= 8; - data += 8; - break; - default: - return -1; - } - - len -= 1; - - if (len < 0) - return -1; - - sdn->sdn_objnamel = cpu_to_le16(*data++); - len -= le16_to_cpu(sdn->sdn_objnamel); - - if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel)) - return -1; - - memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel)); - - return size - len; -} - -struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) -{ - struct hlist_head *list = listen_hash(addr); - struct sock *sk; - - read_lock(&dn_hash_lock); - sk_for_each(sk, list) { - struct dn_scp *scp = DN_SK(sk); - if (sk->sk_state != TCP_LISTEN) - continue; - if (scp->addr.sdn_objnum) { - if (scp->addr.sdn_objnum != addr->sdn_objnum) - continue; - } else { - if (addr->sdn_objnum) - continue; - if (scp->addr.sdn_objnamel != addr->sdn_objnamel) - continue; - if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0) - continue; - } - sock_hold(sk); - read_unlock(&dn_hash_lock); - return sk; - } - - sk = sk_head(&dn_wild_sk); - if (sk) { - if (sk->sk_state == TCP_LISTEN) - sock_hold(sk); - else - sk = NULL; - } - - read_unlock(&dn_hash_lock); - return sk; -} - -struct sock *dn_find_by_skb(struct sk_buff *skb) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct sock *sk; - struct dn_scp *scp; - - read_lock(&dn_hash_lock); - sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { - scp = DN_SK(sk); - if (cb->src != dn_saddr2dn(&scp->peer)) - continue; - if (cb->dst_port != scp->addrloc) - continue; - if (scp->addrrem && (cb->src_port != scp->addrrem)) - continue; - sock_hold(sk); - goto found; - } - sk = NULL; -found: - read_unlock(&dn_hash_lock); - return sk; -} - - - -static void dn_destruct(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - skb_queue_purge(&scp->data_xmit_queue); - skb_queue_purge(&scp->other_xmit_queue); - skb_queue_purge(&scp->other_receive_queue); - - dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); -} - -static unsigned long dn_memory_pressure; - -static void dn_enter_memory_pressure(struct sock *sk) -{ - if (!dn_memory_pressure) { - dn_memory_pressure = 1; - } -} - -static struct proto dn_proto = { - .name = "NSP", - .owner = THIS_MODULE, - .enter_memory_pressure = dn_enter_memory_pressure, - .memory_pressure = &dn_memory_pressure, - - .memory_allocated = &decnet_memory_allocated, - .per_cpu_fw_alloc = &decnet_memory_per_cpu_fw_alloc, - - .sysctl_mem = sysctl_decnet_mem, - .sysctl_wmem = sysctl_decnet_wmem, - .sysctl_rmem = sysctl_decnet_rmem, - .max_header = DN_MAX_NSP_DATA_HEADER + 64, - .obj_size = sizeof(struct dn_sock), -}; - -static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern) -{ - struct dn_scp *scp; - struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern); - - if (!sk) - goto out; - - if (sock) - sock->ops = &dn_proto_ops; - sock_init_data(sock, sk); - - sk->sk_backlog_rcv = dn_nsp_backlog_rcv; - sk->sk_destruct = dn_destruct; - sk->sk_no_check_tx = 1; - sk->sk_family = PF_DECnet; - sk->sk_protocol = 0; - sk->sk_allocation = gfp; - sk->sk_sndbuf = READ_ONCE(sysctl_decnet_wmem[1]); - sk->sk_rcvbuf = READ_ONCE(sysctl_decnet_rmem[1]); - - /* Initialization of DECnet Session Control Port */ - scp = DN_SK(sk); - scp->state = DN_O; /* Open */ - scp->numdat = 1; /* Next data seg to tx */ - scp->numoth = 1; /* Next oth data to tx */ - scp->ackxmt_dat = 0; /* Last data seg ack'ed */ - scp->ackxmt_oth = 0; /* Last oth data ack'ed */ - scp->ackrcv_dat = 0; /* Highest data ack recv*/ - scp->ackrcv_oth = 0; /* Last oth data ack rec*/ - scp->flowrem_sw = DN_SEND; - scp->flowloc_sw = DN_SEND; - scp->flowrem_dat = 0; - scp->flowrem_oth = 1; - scp->flowloc_dat = 0; - scp->flowloc_oth = 1; - scp->services_rem = 0; - scp->services_loc = 1 | NSP_FC_NONE; - scp->info_rem = 0; - scp->info_loc = 0x03; /* NSP version 4.1 */ - scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */ - scp->nonagle = 0; - scp->multi_ireq = 1; - scp->accept_mode = ACC_IMMED; - scp->addr.sdn_family = AF_DECnet; - scp->peer.sdn_family = AF_DECnet; - scp->accessdata.acc_accl = 5; - memcpy(scp->accessdata.acc_acc, "LINUX", 5); - - scp->max_window = NSP_MAX_WINDOW; - scp->snd_window = NSP_MIN_WINDOW; - scp->nsp_srtt = NSP_INITIAL_SRTT; - scp->nsp_rttvar = NSP_INITIAL_RTTVAR; - scp->nsp_rxtshift = 0; - - skb_queue_head_init(&scp->data_xmit_queue); - skb_queue_head_init(&scp->other_xmit_queue); - skb_queue_head_init(&scp->other_receive_queue); - - scp->persist = 0; - scp->persist_fxn = NULL; - scp->keepalive = 10 * HZ; - scp->keepalive_fxn = dn_keepalive; - - dn_start_slow_timer(sk); -out: - return sk; -} - -/* - * Keepalive timer. - * FIXME: Should respond to SO_KEEPALIVE etc. - */ -static void dn_keepalive(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - /* - * By checking the other_data transmit queue is empty - * we are double checking that we are not sending too - * many of these keepalive frames. - */ - if (skb_queue_empty(&scp->other_xmit_queue)) - dn_nsp_send_link(sk, DN_NOCHANGE, 0); -} - - -/* - * Timer for shutdown/destroyed sockets. - * When socket is dead & no packets have been sent for a - * certain amount of time, they are removed by this - * routine. Also takes care of sending out DI & DC - * frames at correct times. - */ -int dn_destroy_timer(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - scp->persist = dn_nsp_persist(sk); - - switch (scp->state) { - case DN_DI: - dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); - if (scp->nsp_rxtshift >= decnet_di_count) - scp->state = DN_CN; - return 0; - - case DN_DR: - dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); - if (scp->nsp_rxtshift >= decnet_dr_count) - scp->state = DN_DRC; - return 0; - - case DN_DN: - if (scp->nsp_rxtshift < decnet_dn_count) { - /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */ - dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, - GFP_ATOMIC); - return 0; - } - } - - scp->persist = (HZ * decnet_time_wait); - - if (sk->sk_socket) - return 0; - - if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) { - dn_unhash_sock(sk); - sock_put(sk); - return 1; - } - - return 0; -} - -static void dn_destroy_sock(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - scp->nsp_rxtshift = 0; /* reset back off */ - - if (sk->sk_socket) { - if (sk->sk_socket->state != SS_UNCONNECTED) - sk->sk_socket->state = SS_DISCONNECTING; - } - - sk->sk_state = TCP_CLOSE; - - switch (scp->state) { - case DN_DN: - dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, - sk->sk_allocation); - scp->persist_fxn = dn_destroy_timer; - scp->persist = dn_nsp_persist(sk); - break; - case DN_CR: - scp->state = DN_DR; - goto disc_reject; - case DN_RUN: - scp->state = DN_DI; - fallthrough; - case DN_DI: - case DN_DR: -disc_reject: - dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation); - fallthrough; - case DN_NC: - case DN_NR: - case DN_RJ: - case DN_DIC: - case DN_CN: - case DN_DRC: - case DN_CI: - case DN_CD: - scp->persist_fxn = dn_destroy_timer; - scp->persist = dn_nsp_persist(sk); - break; - default: - printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n"); - fallthrough; - case DN_O: - dn_stop_slow_timer(sk); - - dn_unhash_sock_bh(sk); - sock_put(sk); - - break; - } -} - -char *dn_addr2asc(__u16 addr, char *buf) -{ - unsigned short node, area; - - node = addr & 0x03ff; - area = addr >> 10; - sprintf(buf, "%hd.%hd", area, node); - - return buf; -} - - - -static int dn_create(struct net *net, struct socket *sock, int protocol, - int kern) -{ - struct sock *sk; - - if (protocol < 0 || protocol > U8_MAX) - return -EINVAL; - - if (!net_eq(net, &init_net)) - return -EAFNOSUPPORT; - - switch (sock->type) { - case SOCK_SEQPACKET: - if (protocol != DNPROTO_NSP) - return -EPROTONOSUPPORT; - break; - case SOCK_STREAM: - break; - default: - return -ESOCKTNOSUPPORT; - } - - - if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL) - return -ENOBUFS; - - sk->sk_protocol = protocol; - - return 0; -} - - -static int -dn_release(struct socket *sock) -{ - struct sock *sk = sock->sk; - - if (sk) { - sock_orphan(sk); - sock_hold(sk); - lock_sock(sk); - dn_destroy_sock(sk); - release_sock(sk); - sock_put(sk); - } - - return 0; -} - -static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr; - struct net_device *dev, *ldev; - int rv; - - if (addr_len != sizeof(struct sockaddr_dn)) - return -EINVAL; - - if (saddr->sdn_family != AF_DECnet) - return -EINVAL; - - if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2)) - return -EINVAL; - - if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL) - return -EINVAL; - - if (saddr->sdn_flags & ~SDF_WILD) - return -EINVAL; - - if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || - (saddr->sdn_flags & SDF_WILD))) - return -EACCES; - - if (!(saddr->sdn_flags & SDF_WILD)) { - if (le16_to_cpu(saddr->sdn_nodeaddrl)) { - rcu_read_lock(); - ldev = NULL; - for_each_netdev_rcu(&init_net, dev) { - if (!dev->dn_ptr) - continue; - if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { - ldev = dev; - break; - } - } - rcu_read_unlock(); - if (ldev == NULL) - return -EADDRNOTAVAIL; - } - } - - rv = -EINVAL; - lock_sock(sk); - if (sock_flag(sk, SOCK_ZAPPED)) { - memcpy(&scp->addr, saddr, addr_len); - sock_reset_flag(sk, SOCK_ZAPPED); - - rv = dn_hash_sock(sk); - if (rv) - sock_set_flag(sk, SOCK_ZAPPED); - } - release_sock(sk); - - return rv; -} - - -static int dn_auto_bind(struct socket *sock) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - int rv; - - sock_reset_flag(sk, SOCK_ZAPPED); - - scp->addr.sdn_flags = 0; - scp->addr.sdn_objnum = 0; - - /* - * This stuff is to keep compatibility with Eduardo's - * patch. I hope I can dispense with it shortly... - */ - if ((scp->accessdata.acc_accl != 0) && - (scp->accessdata.acc_accl <= 12)) { - - scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl); - memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel)); - - scp->accessdata.acc_accl = 0; - memset(scp->accessdata.acc_acc, 0, 40); - } - /* End of compatibility stuff */ - - scp->addr.sdn_add.a_len = cpu_to_le16(2); - rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr); - if (rv == 0) { - rv = dn_hash_sock(sk); - if (rv) - sock_set_flag(sk, SOCK_ZAPPED); - } - - return rv; -} - -static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) -{ - struct dn_scp *scp = DN_SK(sk); - DEFINE_WAIT_FUNC(wait, woken_wake_function); - int err; - - if (scp->state != DN_CR) - return -EINVAL; - - scp->state = DN_CC; - scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); - dn_send_conn_conf(sk, allocation); - - add_wait_queue(sk_sleep(sk), &wait); - for(;;) { - release_sock(sk); - if (scp->state == DN_CC) - *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); - lock_sock(sk); - err = 0; - if (scp->state == DN_RUN) - break; - err = sock_error(sk); - if (err) - break; - err = sock_intr_errno(*timeo); - if (signal_pending(current)) - break; - err = -EAGAIN; - if (!*timeo) - break; - } - remove_wait_queue(sk_sleep(sk), &wait); - if (err == 0) { - sk->sk_socket->state = SS_CONNECTED; - } else if (scp->state != DN_CC) { - sk->sk_socket->state = SS_UNCONNECTED; - } - return err; -} - -static int dn_wait_run(struct sock *sk, long *timeo) -{ - struct dn_scp *scp = DN_SK(sk); - DEFINE_WAIT_FUNC(wait, woken_wake_function); - int err = 0; - - if (scp->state == DN_RUN) - goto out; - - if (!*timeo) - return -EALREADY; - - add_wait_queue(sk_sleep(sk), &wait); - for(;;) { - release_sock(sk); - if (scp->state == DN_CI || scp->state == DN_CC) - *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); - lock_sock(sk); - err = 0; - if (scp->state == DN_RUN) - break; - err = sock_error(sk); - if (err) - break; - err = sock_intr_errno(*timeo); - if (signal_pending(current)) - break; - err = -ETIMEDOUT; - if (!*timeo) - break; - } - remove_wait_queue(sk_sleep(sk), &wait); -out: - if (err == 0) { - sk->sk_socket->state = SS_CONNECTED; - } else if (scp->state != DN_CI && scp->state != DN_CC) { - sk->sk_socket->state = SS_UNCONNECTED; - } - return err; -} - -static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) -{ - struct socket *sock = sk->sk_socket; - struct dn_scp *scp = DN_SK(sk); - int err = -EISCONN; - struct flowidn fld; - struct dst_entry *dst; - - if (sock->state == SS_CONNECTED) - goto out; - - if (sock->state == SS_CONNECTING) { - err = 0; - if (scp->state == DN_RUN) { - sock->state = SS_CONNECTED; - goto out; - } - err = -ECONNREFUSED; - if (scp->state != DN_CI && scp->state != DN_CC) { - sock->state = SS_UNCONNECTED; - goto out; - } - return dn_wait_run(sk, timeo); - } - - err = -EINVAL; - if (scp->state != DN_O) - goto out; - - if (addr == NULL || addrlen != sizeof(struct sockaddr_dn)) - goto out; - if (addr->sdn_family != AF_DECnet) - goto out; - if (addr->sdn_flags & SDF_WILD) - goto out; - - if (sock_flag(sk, SOCK_ZAPPED)) { - err = dn_auto_bind(sk->sk_socket); - if (err) - goto out; - } - - memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn)); - - err = -EHOSTUNREACH; - memset(&fld, 0, sizeof(fld)); - fld.flowidn_oif = sk->sk_bound_dev_if; - fld.daddr = dn_saddr2dn(&scp->peer); - fld.saddr = dn_saddr2dn(&scp->addr); - dn_sk_ports_copy(&fld, scp); - fld.flowidn_proto = DNPROTO_NSP; - if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0) - goto out; - dst = __sk_dst_get(sk); - sk->sk_route_caps = dst->dev->features; - sock->state = SS_CONNECTING; - scp->state = DN_CI; - scp->segsize_loc = dst_metric_advmss(dst); - - dn_nsp_send_conninit(sk, NSP_CI); - err = -EINPROGRESS; - if (*timeo) { - err = dn_wait_run(sk, timeo); - } -out: - return err; -} - -static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags) -{ - struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr; - struct sock *sk = sock->sk; - int err; - long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); - - lock_sock(sk); - err = __dn_connect(sk, addr, addrlen, &timeo, 0); - release_sock(sk); - - return err; -} - -static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) -{ - struct dn_scp *scp = DN_SK(sk); - - switch (scp->state) { - case DN_RUN: - return 0; - case DN_CR: - return dn_confirm_accept(sk, timeo, sk->sk_allocation); - case DN_CI: - case DN_CC: - return dn_wait_run(sk, timeo); - case DN_O: - return __dn_connect(sk, addr, addrlen, timeo, flags); - } - - return -EINVAL; -} - - -static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc) -{ - unsigned char *ptr = skb->data; - - acc->acc_userl = *ptr++; - memcpy(&acc->acc_user, ptr, acc->acc_userl); - ptr += acc->acc_userl; - - acc->acc_passl = *ptr++; - memcpy(&acc->acc_pass, ptr, acc->acc_passl); - ptr += acc->acc_passl; - - acc->acc_accl = *ptr++; - memcpy(&acc->acc_acc, ptr, acc->acc_accl); - - skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3); - -} - -static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt) -{ - unsigned char *ptr = skb->data; - u16 len = *ptr++; /* yes, it's 8bit on the wire */ - - BUG_ON(len > 16); /* we've checked the contents earlier */ - opt->opt_optl = cpu_to_le16(len); - opt->opt_status = 0; - memcpy(opt->opt_data, ptr, len); - skb_pull(skb, len + 1); -} - -static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct sk_buff *skb = NULL; - int err = 0; - - add_wait_queue(sk_sleep(sk), &wait); - for(;;) { - release_sock(sk); - skb = skb_dequeue(&sk->sk_receive_queue); - if (skb == NULL) { - *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); - skb = skb_dequeue(&sk->sk_receive_queue); - } - lock_sock(sk); - if (skb != NULL) - break; - err = -EINVAL; - if (sk->sk_state != TCP_LISTEN) - break; - err = sock_intr_errno(*timeo); - if (signal_pending(current)) - break; - err = -EAGAIN; - if (!*timeo) - break; - } - remove_wait_queue(sk_sleep(sk), &wait); - - return skb == NULL ? ERR_PTR(err) : skb; -} - -static int dn_accept(struct socket *sock, struct socket *newsock, int flags, - bool kern) -{ - struct sock *sk = sock->sk, *newsk; - struct sk_buff *skb = NULL; - struct dn_skb_cb *cb; - unsigned char menuver; - int err = 0; - unsigned char type; - long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); - struct dst_entry *dst; - - lock_sock(sk); - - if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) { - release_sock(sk); - return -EINVAL; - } - - skb = skb_dequeue(&sk->sk_receive_queue); - if (skb == NULL) { - skb = dn_wait_for_connect(sk, &timeo); - if (IS_ERR(skb)) { - release_sock(sk); - return PTR_ERR(skb); - } - } - - cb = DN_SKB_CB(skb); - sk_acceptq_removed(sk); - newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern); - if (newsk == NULL) { - release_sock(sk); - kfree_skb(skb); - return -ENOBUFS; - } - release_sock(sk); - - dst = skb_dst(skb); - sk_dst_set(newsk, dst); - skb_dst_set(skb, NULL); - - DN_SK(newsk)->state = DN_CR; - DN_SK(newsk)->addrrem = cb->src_port; - DN_SK(newsk)->services_rem = cb->services; - DN_SK(newsk)->info_rem = cb->info; - DN_SK(newsk)->segsize_rem = cb->segsize; - DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode; - - if (DN_SK(newsk)->segsize_rem < 230) - DN_SK(newsk)->segsize_rem = 230; - - if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE) - DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd; - - newsk->sk_state = TCP_LISTEN; - memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn)); - - /* - * If we are listening on a wild socket, we don't want - * the newly created socket on the wrong hash queue. - */ - DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD; - - skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type)); - skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type)); - *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src; - *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst; - - menuver = *skb->data; - skb_pull(skb, 1); - - if (menuver & DN_MENUVER_ACC) - dn_access_copy(skb, &(DN_SK(newsk)->accessdata)); - - if (menuver & DN_MENUVER_USR) - dn_user_copy(skb, &(DN_SK(newsk)->conndata_in)); - - if (menuver & DN_MENUVER_PRX) - DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY; - - if (menuver & DN_MENUVER_UIC) - DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY; - - kfree_skb(skb); - - memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out), - sizeof(struct optdata_dn)); - memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out), - sizeof(struct optdata_dn)); - - lock_sock(newsk); - err = dn_hash_sock(newsk); - if (err == 0) { - sock_reset_flag(newsk, SOCK_ZAPPED); - dn_send_conn_ack(newsk); - - /* - * Here we use sk->sk_allocation since although the conn conf is - * for the newsk, the context is the old socket. - */ - if (DN_SK(newsk)->accept_mode == ACC_IMMED) - err = dn_confirm_accept(newsk, &timeo, - sk->sk_allocation); - } - release_sock(newsk); - return err; -} - - -static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer) -{ - struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr; - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - - lock_sock(sk); - - if (peer) { - if ((sock->state != SS_CONNECTED && - sock->state != SS_CONNECTING) && - scp->accept_mode == ACC_IMMED) { - release_sock(sk); - return -ENOTCONN; - } - - memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn)); - } else { - memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn)); - } - - release_sock(sk); - - return sizeof(struct sockaddr_dn); -} - - -static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - __poll_t mask = datagram_poll(file, sock, wait); - - if (!skb_queue_empty_lockless(&scp->other_receive_queue)) - mask |= EPOLLRDBAND; - - return mask; -} - -static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - int err = -EOPNOTSUPP; - long amount = 0; - struct sk_buff *skb; - int val; - - switch(cmd) - { - case SIOCGIFADDR: - case SIOCSIFADDR: - return dn_dev_ioctl(cmd, (void __user *)arg); - - case SIOCATMARK: - lock_sock(sk); - val = !skb_queue_empty(&scp->other_receive_queue); - if (scp->state != DN_RUN) - val = -ENOTCONN; - release_sock(sk); - return val; - - case TIOCOUTQ: - amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); - if (amount < 0) - amount = 0; - err = put_user(amount, (int __user *)arg); - break; - - case TIOCINQ: - lock_sock(sk); - skb = skb_peek(&scp->other_receive_queue); - if (skb) { - amount = skb->len; - } else { - skb_queue_walk(&sk->sk_receive_queue, skb) - amount += skb->len; - } - release_sock(sk); - err = put_user(amount, (int __user *)arg); - break; - - default: - err = -ENOIOCTLCMD; - break; - } - - return err; -} - -static int dn_listen(struct socket *sock, int backlog) -{ - struct sock *sk = sock->sk; - int err = -EINVAL; - - lock_sock(sk); - - if (sock_flag(sk, SOCK_ZAPPED)) - goto out; - - if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN)) - goto out; - - sk->sk_max_ack_backlog = backlog; - sk->sk_ack_backlog = 0; - sk->sk_state = TCP_LISTEN; - err = 0; - dn_rehash_sock(sk); - -out: - release_sock(sk); - - return err; -} - - -static int dn_shutdown(struct socket *sock, int how) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - int err = -ENOTCONN; - - lock_sock(sk); - - if (sock->state == SS_UNCONNECTED) - goto out; - - err = 0; - if (sock->state == SS_DISCONNECTING) - goto out; - - err = -EINVAL; - if (scp->state == DN_O) - goto out; - - if (how != SHUT_RDWR) - goto out; - - sk->sk_shutdown = SHUTDOWN_MASK; - dn_destroy_sock(sk); - err = 0; - -out: - release_sock(sk); - - return err; -} - -static int dn_setsockopt(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int optlen) -{ - struct sock *sk = sock->sk; - int err; - - lock_sock(sk); - err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); - release_sock(sk); -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != DSO_LINKINFO && - optname != DSO_STREAM && optname != DSO_SEQPACKET) - err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); -#endif - - return err; -} - -static int __dn_setsockopt(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int optlen, int flags) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - long timeo; - union { - struct optdata_dn opt; - struct accessdata_dn acc; - int mode; - unsigned long win; - int val; - unsigned char services; - unsigned char info; - } u; - int err; - - if (optlen && sockptr_is_null(optval)) - return -EINVAL; - - if (optlen > sizeof(u)) - return -EINVAL; - - if (copy_from_sockptr(&u, optval, optlen)) - return -EFAULT; - - switch (optname) { - case DSO_CONDATA: - if (sock->state == SS_CONNECTED) - return -EISCONN; - if ((scp->state != DN_O) && (scp->state != DN_CR)) - return -EINVAL; - - if (optlen != sizeof(struct optdata_dn)) - return -EINVAL; - - if (le16_to_cpu(u.opt.opt_optl) > 16) - return -EINVAL; - - memcpy(&scp->conndata_out, &u.opt, optlen); - break; - - case DSO_DISDATA: - if (sock->state != SS_CONNECTED && - scp->accept_mode == ACC_IMMED) - return -ENOTCONN; - - if (optlen != sizeof(struct optdata_dn)) - return -EINVAL; - - if (le16_to_cpu(u.opt.opt_optl) > 16) - return -EINVAL; - - memcpy(&scp->discdata_out, &u.opt, optlen); - break; - - case DSO_CONACCESS: - if (sock->state == SS_CONNECTED) - return -EISCONN; - if (scp->state != DN_O) - return -EINVAL; - - if (optlen != sizeof(struct accessdata_dn)) - return -EINVAL; - - if ((u.acc.acc_accl > DN_MAXACCL) || - (u.acc.acc_passl > DN_MAXACCL) || - (u.acc.acc_userl > DN_MAXACCL)) - return -EINVAL; - - memcpy(&scp->accessdata, &u.acc, optlen); - break; - - case DSO_ACCEPTMODE: - if (sock->state == SS_CONNECTED) - return -EISCONN; - if (scp->state != DN_O) - return -EINVAL; - - if (optlen != sizeof(int)) - return -EINVAL; - - if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER)) - return -EINVAL; - - scp->accept_mode = (unsigned char)u.mode; - break; - - case DSO_CONACCEPT: - if (scp->state != DN_CR) - return -EINVAL; - timeo = sock_rcvtimeo(sk, 0); - err = dn_confirm_accept(sk, &timeo, sk->sk_allocation); - return err; - - case DSO_CONREJECT: - if (scp->state != DN_CR) - return -EINVAL; - - scp->state = DN_DR; - sk->sk_shutdown = SHUTDOWN_MASK; - dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); - break; - - case DSO_MAXWINDOW: - if (optlen != sizeof(unsigned long)) - return -EINVAL; - if (u.win > NSP_MAX_WINDOW) - u.win = NSP_MAX_WINDOW; - if (u.win == 0) - return -EINVAL; - scp->max_window = u.win; - if (scp->snd_window > u.win) - scp->snd_window = u.win; - break; - - case DSO_NODELAY: - if (optlen != sizeof(int)) - return -EINVAL; - if (scp->nonagle == TCP_NAGLE_CORK) - return -EINVAL; - scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF; - /* if (scp->nonagle == 1) { Push pending frames } */ - break; - - case DSO_CORK: - if (optlen != sizeof(int)) - return -EINVAL; - if (scp->nonagle == TCP_NAGLE_OFF) - return -EINVAL; - scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK; - /* if (scp->nonagle == 0) { Push pending frames } */ - break; - - case DSO_SERVICES: - if (optlen != sizeof(unsigned char)) - return -EINVAL; - if ((u.services & ~NSP_FC_MASK) != 0x01) - return -EINVAL; - if ((u.services & NSP_FC_MASK) == NSP_FC_MASK) - return -EINVAL; - scp->services_loc = u.services; - break; - - case DSO_INFO: - if (optlen != sizeof(unsigned char)) - return -EINVAL; - if (u.info & 0xfc) - return -EINVAL; - scp->info_loc = u.info; - break; - - case DSO_LINKINFO: - case DSO_STREAM: - case DSO_SEQPACKET: - default: - return -ENOPROTOOPT; - } - - return 0; -} - -static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) -{ - struct sock *sk = sock->sk; - int err; - - lock_sock(sk); - err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); - release_sock(sk); -#ifdef CONFIG_NETFILTER - if (err == -ENOPROTOOPT && optname != DSO_STREAM && - optname != DSO_SEQPACKET && optname != DSO_CONACCEPT && - optname != DSO_CONREJECT) { - int len; - - if (get_user(len, optlen)) - return -EFAULT; - - err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len); - if (err >= 0) - err = put_user(len, optlen); - } -#endif - - return err; -} - -static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - struct linkinfo_dn link; - unsigned int r_len; - void *r_data = NULL; - unsigned int val; - - if(get_user(r_len , optlen)) - return -EFAULT; - - switch (optname) { - case DSO_CONDATA: - if (r_len > sizeof(struct optdata_dn)) - r_len = sizeof(struct optdata_dn); - r_data = &scp->conndata_in; - break; - - case DSO_DISDATA: - if (r_len > sizeof(struct optdata_dn)) - r_len = sizeof(struct optdata_dn); - r_data = &scp->discdata_in; - break; - - case DSO_CONACCESS: - if (r_len > sizeof(struct accessdata_dn)) - r_len = sizeof(struct accessdata_dn); - r_data = &scp->accessdata; - break; - - case DSO_ACCEPTMODE: - if (r_len > sizeof(unsigned char)) - r_len = sizeof(unsigned char); - r_data = &scp->accept_mode; - break; - - case DSO_LINKINFO: - if (r_len > sizeof(struct linkinfo_dn)) - r_len = sizeof(struct linkinfo_dn); - - memset(&link, 0, sizeof(link)); - - switch (sock->state) { - case SS_CONNECTING: - link.idn_linkstate = LL_CONNECTING; - break; - case SS_DISCONNECTING: - link.idn_linkstate = LL_DISCONNECTING; - break; - case SS_CONNECTED: - link.idn_linkstate = LL_RUNNING; - break; - default: - link.idn_linkstate = LL_INACTIVE; - } - - link.idn_segsize = scp->segsize_rem; - r_data = &link; - break; - - case DSO_MAXWINDOW: - if (r_len > sizeof(unsigned long)) - r_len = sizeof(unsigned long); - r_data = &scp->max_window; - break; - - case DSO_NODELAY: - if (r_len > sizeof(int)) - r_len = sizeof(int); - val = (scp->nonagle == TCP_NAGLE_OFF); - r_data = &val; - break; - - case DSO_CORK: - if (r_len > sizeof(int)) - r_len = sizeof(int); - val = (scp->nonagle == TCP_NAGLE_CORK); - r_data = &val; - break; - - case DSO_SERVICES: - if (r_len > sizeof(unsigned char)) - r_len = sizeof(unsigned char); - r_data = &scp->services_rem; - break; - - case DSO_INFO: - if (r_len > sizeof(unsigned char)) - r_len = sizeof(unsigned char); - r_data = &scp->info_rem; - break; - - case DSO_STREAM: - case DSO_SEQPACKET: - case DSO_CONACCEPT: - case DSO_CONREJECT: - default: - return -ENOPROTOOPT; - } - - if (r_data) { - if (copy_to_user(optval, r_data, r_len)) - return -EFAULT; - if (put_user(r_len, optlen)) - return -EFAULT; - } - - return 0; -} - - -static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) -{ - struct sk_buff *skb; - int len = 0; - - if (flags & MSG_OOB) - return !skb_queue_empty(q) ? 1 : 0; - - skb_queue_walk(q, skb) { - struct dn_skb_cb *cb = DN_SKB_CB(skb); - len += skb->len; - - if (cb->nsp_flags & 0x40) { - /* SOCK_SEQPACKET reads to EOM */ - if (sk->sk_type == SOCK_SEQPACKET) - return 1; - /* so does SOCK_STREAM unless WAITALL is specified */ - if (!(flags & MSG_WAITALL)) - return 1; - } - - /* minimum data length for read exceeded */ - if (len >= target) - return 1; - } - - return 0; -} - - -static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, - int flags) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - struct sk_buff_head *queue = &sk->sk_receive_queue; - size_t target = size > 1 ? 1 : 0; - size_t copied = 0; - int rv = 0; - struct sk_buff *skb, *n; - struct dn_skb_cb *cb = NULL; - unsigned char eor = 0; - long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); - - lock_sock(sk); - - if (sock_flag(sk, SOCK_ZAPPED)) { - rv = -EADDRNOTAVAIL; - goto out; - } - - if (sk->sk_shutdown & RCV_SHUTDOWN) { - rv = 0; - goto out; - } - - rv = dn_check_state(sk, NULL, 0, &timeo, flags); - if (rv) - goto out; - - if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) { - rv = -EOPNOTSUPP; - goto out; - } - - if (flags & MSG_OOB) - queue = &scp->other_receive_queue; - - if (flags & MSG_WAITALL) - target = size; - - - /* - * See if there is data ready to read, sleep if there isn't - */ - for(;;) { - DEFINE_WAIT_FUNC(wait, woken_wake_function); - - if (sk->sk_err) - goto out; - - if (!skb_queue_empty(&scp->other_receive_queue)) { - if (!(flags & MSG_OOB)) { - msg->msg_flags |= MSG_OOB; - if (!scp->other_report) { - scp->other_report = 1; - goto out; - } - } - } - - if (scp->state != DN_RUN) - goto out; - - if (signal_pending(current)) { - rv = sock_intr_errno(timeo); - goto out; - } - - if (dn_data_ready(sk, queue, flags, target)) - break; - - if (flags & MSG_DONTWAIT) { - rv = -EWOULDBLOCK; - goto out; - } - - add_wait_queue(sk_sleep(sk), &wait); - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait); - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); - remove_wait_queue(sk_sleep(sk), &wait); - } - - skb_queue_walk_safe(queue, skb, n) { - unsigned int chunk = skb->len; - cb = DN_SKB_CB(skb); - - if ((chunk + copied) > size) - chunk = size - copied; - - if (memcpy_to_msg(msg, skb->data, chunk)) { - rv = -EFAULT; - break; - } - copied += chunk; - - if (!(flags & MSG_PEEK)) - skb_pull(skb, chunk); - - eor = cb->nsp_flags & 0x40; - - if (skb->len == 0) { - skb_unlink(skb, queue); - kfree_skb(skb); - /* - * N.B. Don't refer to skb or cb after this point - * in loop. - */ - if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) { - scp->flowloc_sw = DN_SEND; - dn_nsp_send_link(sk, DN_SEND, 0); - } - } - - if (eor) { - if (sk->sk_type == SOCK_SEQPACKET) - break; - if (!(flags & MSG_WAITALL)) - break; - } - - if (flags & MSG_OOB) - break; - - if (copied >= target) - break; - } - - rv = copied; - - - if (eor && (sk->sk_type == SOCK_SEQPACKET)) - msg->msg_flags |= MSG_EOR; - -out: - if (rv == 0) - rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk); - - if ((rv >= 0) && msg->msg_name) { - __sockaddr_check_size(sizeof(struct sockaddr_dn)); - memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn)); - msg->msg_namelen = sizeof(struct sockaddr_dn); - } - - release_sock(sk); - - return rv; -} - - -static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags) -{ - unsigned char fctype = scp->services_rem & NSP_FC_MASK; - if (skb_queue_len(queue) >= scp->snd_window) - return 1; - if (fctype != NSP_FC_NONE) { - if (flags & MSG_OOB) { - if (scp->flowrem_oth == 0) - return 1; - } else { - if (scp->flowrem_dat == 0) - return 1; - } - } - return 0; -} - -/* - * The DECnet spec requires that the "routing layer" accepts packets which - * are at least 230 bytes in size. This excludes any headers which the NSP - * layer might add, so we always assume that we'll be using the maximal - * length header on data packets. The variation in length is due to the - * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't - * make much practical difference. - */ -unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu) -{ - unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER; - if (dev) { - struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); - mtu -= LL_RESERVED_SPACE(dev); - if (dn_db->use_long) - mtu -= 21; - else - mtu -= 6; - mtu -= DN_MAX_NSP_DATA_HEADER; - } else { - /* - * 21 = long header, 16 = guess at MAC header length - */ - mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16); - } - if (mtu > mss) - mss = mtu; - return mss; -} - -static inline unsigned int dn_current_mss(struct sock *sk, int flags) -{ - struct dst_entry *dst = __sk_dst_get(sk); - struct dn_scp *scp = DN_SK(sk); - int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem); - - /* Other data messages are limited to 16 bytes per packet */ - if (flags & MSG_OOB) - return 16; - - /* This works out the maximum size of segment we can send out */ - if (dst) { - u32 mtu = dst_mtu(dst); - mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now); - } - - return mss_now; -} - -/* - * N.B. We get the timeout wrong here, but then we always did get it - * wrong before and this is another step along the road to correcting - * it. It ought to get updated each time we pass through the routine, - * but in practise it probably doesn't matter too much for now. - */ -static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk, - unsigned long datalen, int noblock, - int *errcode) -{ - struct sk_buff *skb = sock_alloc_send_skb(sk, datalen, - noblock, errcode); - if (skb) { - skb->protocol = htons(ETH_P_DNA_RT); - skb->pkt_type = PACKET_OUTGOING; - } - return skb; -} - -static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) -{ - struct sock *sk = sock->sk; - struct dn_scp *scp = DN_SK(sk); - size_t mss; - struct sk_buff_head *queue = &scp->data_xmit_queue; - int flags = msg->msg_flags; - int err = 0; - size_t sent = 0; - int addr_len = msg->msg_namelen; - DECLARE_SOCKADDR(struct sockaddr_dn *, addr, msg->msg_name); - struct sk_buff *skb = NULL; - struct dn_skb_cb *cb; - size_t len; - unsigned char fctype; - long timeo; - - if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT)) - return -EOPNOTSUPP; - - if (addr_len && (addr_len != sizeof(struct sockaddr_dn))) - return -EINVAL; - - lock_sock(sk); - timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); - /* - * The only difference between stream sockets and sequenced packet - * sockets is that the stream sockets always behave as if MSG_EOR - * has been set. - */ - if (sock->type == SOCK_STREAM) { - if (flags & MSG_EOR) { - err = -EINVAL; - goto out; - } - flags |= MSG_EOR; - } - - - err = dn_check_state(sk, addr, addr_len, &timeo, flags); - if (err) - goto out_err; - - if (sk->sk_shutdown & SEND_SHUTDOWN) { - err = -EPIPE; - if (!(flags & MSG_NOSIGNAL)) - send_sig(SIGPIPE, current, 0); - goto out_err; - } - - if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) - dst_negative_advice(sk); - - mss = scp->segsize_rem; - fctype = scp->services_rem & NSP_FC_MASK; - - mss = dn_current_mss(sk, flags); - - if (flags & MSG_OOB) { - queue = &scp->other_xmit_queue; - if (size > mss) { - err = -EMSGSIZE; - goto out; - } - } - - scp->persist_fxn = dn_nsp_xmit_timeout; - - while(sent < size) { - err = sock_error(sk); - if (err) - goto out; - - if (signal_pending(current)) { - err = sock_intr_errno(timeo); - goto out; - } - - /* - * Calculate size that we wish to send. - */ - len = size - sent; - - if (len > mss) - len = mss; - - /* - * Wait for queue size to go down below the window - * size. - */ - if (dn_queue_too_long(scp, queue, flags)) { - DEFINE_WAIT_FUNC(wait, woken_wake_function); - - if (flags & MSG_DONTWAIT) { - err = -EWOULDBLOCK; - goto out; - } - - add_wait_queue(sk_sleep(sk), &wait); - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - sk_wait_event(sk, &timeo, - !dn_queue_too_long(scp, queue, flags), &wait); - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); - remove_wait_queue(sk_sleep(sk), &wait); - continue; - } - - /* - * Get a suitably sized skb. - * 64 is a bit of a hack really, but its larger than any - * link-layer headers and has served us well as a good - * guess as to their real length. - */ - skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER, - flags & MSG_DONTWAIT, &err); - - if (err) - break; - - if (!skb) - continue; - - cb = DN_SKB_CB(skb); - - skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER); - - if (memcpy_from_msg(skb_put(skb, len), msg, len)) { - err = -EFAULT; - goto out; - } - - if (flags & MSG_OOB) { - cb->nsp_flags = 0x30; - if (fctype != NSP_FC_NONE) - scp->flowrem_oth--; - } else { - cb->nsp_flags = 0x00; - if (scp->seg_total == 0) - cb->nsp_flags |= 0x20; - - scp->seg_total += len; - - if (((sent + len) == size) && (flags & MSG_EOR)) { - cb->nsp_flags |= 0x40; - scp->seg_total = 0; - if (fctype == NSP_FC_SCMC) - scp->flowrem_dat--; - } - if (fctype == NSP_FC_SRC) - scp->flowrem_dat--; - } - - sent += len; - dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB); - skb = NULL; - - scp->persist = dn_nsp_persist(sk); - - } -out: - - kfree_skb(skb); - - release_sock(sk); - - return sent ? sent : err; - -out_err: - err = sk_stream_error(sk, flags, err); - release_sock(sk); - return err; -} - -static int dn_device_event(struct notifier_block *this, unsigned long event, - void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_UP: - dn_dev_up(dev); - break; - case NETDEV_DOWN: - dn_dev_down(dev); - break; - default: - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block dn_dev_notifier = { - .notifier_call = dn_device_event, -}; - -static struct packet_type dn_dix_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_DNA_RT), - .func = dn_route_rcv, -}; - -#ifdef CONFIG_PROC_FS -struct dn_iter_state { - int bucket; -}; - -static struct sock *dn_socket_get_first(struct seq_file *seq) -{ - struct dn_iter_state *state = seq->private; - struct sock *n = NULL; - - for(state->bucket = 0; - state->bucket < DN_SK_HASH_SIZE; - ++state->bucket) { - n = sk_head(&dn_sk_hash[state->bucket]); - if (n) - break; - } - - return n; -} - -static struct sock *dn_socket_get_next(struct seq_file *seq, - struct sock *n) -{ - struct dn_iter_state *state = seq->private; - - n = sk_next(n); - while (!n) { - if (++state->bucket >= DN_SK_HASH_SIZE) - break; - n = sk_head(&dn_sk_hash[state->bucket]); - } - return n; -} - -static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos) -{ - struct sock *sk = dn_socket_get_first(seq); - - if (sk) { - while(*pos && (sk = dn_socket_get_next(seq, sk))) - --*pos; - } - return *pos ? NULL : sk; -} - -static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos) -{ - void *rc; - read_lock_bh(&dn_hash_lock); - rc = socket_get_idx(seq, &pos); - if (!rc) { - read_unlock_bh(&dn_hash_lock); - } - return rc; -} - -static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos) -{ - return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; -} - -static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - void *rc; - - if (v == SEQ_START_TOKEN) { - rc = dn_socket_get_idx(seq, 0); - goto out; - } - - rc = dn_socket_get_next(seq, v); - if (rc) - goto out; - read_unlock_bh(&dn_hash_lock); -out: - ++*pos; - return rc; -} - -static void dn_socket_seq_stop(struct seq_file *seq, void *v) -{ - if (v && v != SEQ_START_TOKEN) - read_unlock_bh(&dn_hash_lock); -} - -#define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126) - -static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf) -{ - int i; - - switch (le16_to_cpu(dn->sdn_objnamel)) { - case 0: - sprintf(buf, "%d", dn->sdn_objnum); - break; - default: - for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) { - buf[i] = dn->sdn_objname[i]; - if (IS_NOT_PRINTABLE(buf[i])) - buf[i] = '.'; - } - buf[i] = 0; - } -} - -static char *dn_state2asc(unsigned char state) -{ - switch (state) { - case DN_O: - return "OPEN"; - case DN_CR: - return " CR"; - case DN_DR: - return " DR"; - case DN_DRC: - return " DRC"; - case DN_CC: - return " CC"; - case DN_CI: - return " CI"; - case DN_NR: - return " NR"; - case DN_NC: - return " NC"; - case DN_CD: - return " CD"; - case DN_RJ: - return " RJ"; - case DN_RUN: - return " RUN"; - case DN_DI: - return " DI"; - case DN_DIC: - return " DIC"; - case DN_DN: - return " DN"; - case DN_CL: - return " CL"; - case DN_CN: - return " CN"; - } - - return "????"; -} - -static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - char buf1[DN_ASCBUF_LEN]; - char buf2[DN_ASCBUF_LEN]; - char local_object[DN_MAXOBJL+3]; - char remote_object[DN_MAXOBJL+3]; - - dn_printable_object(&scp->addr, local_object); - dn_printable_object(&scp->peer, remote_object); - - seq_printf(seq, - "%6s/%04X %04d:%04d %04d:%04d %01d %-16s " - "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n", - dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1), - scp->addrloc, - scp->numdat, - scp->numoth, - scp->ackxmt_dat, - scp->ackxmt_oth, - scp->flowloc_sw, - local_object, - dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2), - scp->addrrem, - scp->numdat_rcv, - scp->numoth_rcv, - scp->ackrcv_dat, - scp->ackrcv_oth, - scp->flowrem_sw, - remote_object, - dn_state2asc(scp->state), - ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER")); -} - -static int dn_socket_seq_show(struct seq_file *seq, void *v) -{ - if (v == SEQ_START_TOKEN) { - seq_puts(seq, "Local Remote\n"); - } else { - dn_socket_format_entry(seq, v); - } - return 0; -} - -static const struct seq_operations dn_socket_seq_ops = { - .start = dn_socket_seq_start, - .next = dn_socket_seq_next, - .stop = dn_socket_seq_stop, - .show = dn_socket_seq_show, -}; -#endif - -static const struct net_proto_family dn_family_ops = { - .family = AF_DECnet, - .create = dn_create, - .owner = THIS_MODULE, -}; - -static const struct proto_ops dn_proto_ops = { - .family = AF_DECnet, - .owner = THIS_MODULE, - .release = dn_release, - .bind = dn_bind, - .connect = dn_connect, - .socketpair = sock_no_socketpair, - .accept = dn_accept, - .getname = dn_getname, - .poll = dn_poll, - .ioctl = dn_ioctl, - .listen = dn_listen, - .shutdown = dn_shutdown, - .setsockopt = dn_setsockopt, - .getsockopt = dn_getsockopt, - .sendmsg = dn_sendmsg, - .recvmsg = dn_recvmsg, - .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, -}; - -MODULE_DESCRIPTION("The Linux DECnet Network Protocol"); -MODULE_AUTHOR("Linux DECnet Project Team"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NETPROTO(PF_DECnet); - -static const char banner[] __initconst = KERN_INFO -"NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n"; - -static int __init decnet_init(void) -{ - int rc; - - printk(banner); - - rc = proto_register(&dn_proto, 1); - if (rc != 0) - goto out; - - dn_neigh_init(); - dn_dev_init(); - dn_route_init(); - dn_fib_init(); - - sock_register(&dn_family_ops); - dev_add_pack(&dn_dix_packet_type); - register_netdevice_notifier(&dn_dev_notifier); - - proc_create_seq_private("decnet", 0444, init_net.proc_net, - &dn_socket_seq_ops, sizeof(struct dn_iter_state), - NULL); - dn_register_sysctl(); -out: - return rc; - -} -module_init(decnet_init); - -/* - * Prevent DECnet module unloading until its fixed properly. - * Requires an audit of the code to check for memory leaks and - * initialisation problems etc. - */ -#if 0 -static void __exit decnet_exit(void) -{ - sock_unregister(AF_DECnet); - rtnl_unregister_all(PF_DECnet); - dev_remove_pack(&dn_dix_packet_type); - - dn_unregister_sysctl(); - - unregister_netdevice_notifier(&dn_dev_notifier); - - dn_route_cleanup(); - dn_dev_cleanup(); - dn_neigh_cleanup(); - dn_fib_cleanup(); - - remove_proc_entry("decnet", init_net.proc_net); - - proto_unregister(&dn_proto); - - rcu_barrier(); /* Wait for completion of call_rcu()'s */ -} -module_exit(decnet_exit); -#endif diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c deleted file mode 100644 index a09ba642b5e7..000000000000 --- a/net/decnet/dn_dev.c +++ /dev/null @@ -1,1433 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Device Layer - * - * Authors: Steve Whitehouse <SteveW@ACM.org> - * Eduardo Marcelo Serrat <emserrat@geocities.com> - * - * Changes: - * Steve Whitehouse : Devices now see incoming frames so they - * can mark on who it came from. - * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour - * can now have a device specific setup func. - * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/ - * Steve Whitehouse : Fixed bug which sometimes killed timer - * Steve Whitehouse : Multiple ifaddr support - * Steve Whitehouse : SIOCGIFCONF is now a compile time option - * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding - * Steve Whitehouse : Removed timer1 - it's a user space issue now - * Patrick Caulfield : Fixed router hello message format - * Steve Whitehouse : Got rid of constant sizes for blksize for - * devices. All mtu based now. - */ - -#include <linux/capability.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/init.h> -#include <linux/net.h> -#include <linux/netdevice.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/timer.h> -#include <linux/string.h> -#include <linux/if_addr.h> -#include <linux/if_arp.h> -#include <linux/if_ether.h> -#include <linux/skbuff.h> -#include <linux/sysctl.h> -#include <linux/notifier.h> -#include <linux/slab.h> -#include <linux/jiffies.h> -#include <linux/uaccess.h> -#include <net/net_namespace.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/flow.h> -#include <net/fib_rules.h> -#include <net/netlink.h> -#include <net/dn.h> -#include <net/dn_dev.h> -#include <net/dn_route.h> -#include <net/dn_neigh.h> -#include <net/dn_fib.h> - -#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn)) - -static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; -static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; -static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00}; -static unsigned char dn_eco_version[3] = {0x02,0x00,0x00}; - -extern struct neigh_table dn_neigh_table; - -/* - * decnet_address is kept in network order. - */ -__le16 decnet_address = 0; - -static DEFINE_SPINLOCK(dndev_lock); -static struct net_device *decnet_default_device; -static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); - -static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); -static void dn_dev_delete(struct net_device *dev); -static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa); - -static int dn_eth_up(struct net_device *); -static void dn_eth_down(struct net_device *); -static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa); -static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa); - -static struct dn_dev_parms dn_dev_list[] = { -{ - .type = ARPHRD_ETHER, /* Ethernet */ - .mode = DN_DEV_BCAST, - .state = DN_DEV_S_RU, - .t2 = 1, - .t3 = 10, - .name = "ethernet", - .up = dn_eth_up, - .down = dn_eth_down, - .timer3 = dn_send_brd_hello, -}, -{ - .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */ - .mode = DN_DEV_BCAST, - .state = DN_DEV_S_RU, - .t2 = 1, - .t3 = 10, - .name = "ipgre", - .timer3 = dn_send_brd_hello, -}, -#if 0 -{ - .type = ARPHRD_X25, /* Bog standard X.25 */ - .mode = DN_DEV_UCAST, - .state = DN_DEV_S_DS, - .t2 = 1, - .t3 = 120, - .name = "x25", - .timer3 = dn_send_ptp_hello, -}, -#endif -#if 0 -{ - .type = ARPHRD_PPP, /* DECnet over PPP */ - .mode = DN_DEV_BCAST, - .state = DN_DEV_S_RU, - .t2 = 1, - .t3 = 10, - .name = "ppp", - .timer3 = dn_send_brd_hello, -}, -#endif -{ - .type = ARPHRD_DDCMP, /* DECnet over DDCMP */ - .mode = DN_DEV_UCAST, - .state = DN_DEV_S_DS, - .t2 = 1, - .t3 = 120, - .name = "ddcmp", - .timer3 = dn_send_ptp_hello, -}, -{ - .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */ - .mode = DN_DEV_BCAST, - .state = DN_DEV_S_RU, - .t2 = 1, - .t3 = 10, - .name = "loopback", - .timer3 = dn_send_brd_hello, -} -}; - -#define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list) - -#define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x) - -#ifdef CONFIG_SYSCTL - -static int min_t2[] = { 1 }; -static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */ -static int min_t3[] = { 1 }; -static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */ - -static int min_priority[1]; -static int max_priority[] = { 127 }; /* From DECnet spec */ - -static int dn_forwarding_proc(struct ctl_table *, int, void *, size_t *, - loff_t *); -static struct dn_dev_sysctl_table { - struct ctl_table_header *sysctl_header; - struct ctl_table dn_dev_vars[5]; -} dn_dev_sysctl = { - NULL, - { - { - .procname = "forwarding", - .data = (void *)DN_DEV_PARMS_OFFSET(forwarding), - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = dn_forwarding_proc, - }, - { - .procname = "priority", - .data = (void *)DN_DEV_PARMS_OFFSET(priority), - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_priority, - .extra2 = &max_priority - }, - { - .procname = "t2", - .data = (void *)DN_DEV_PARMS_OFFSET(t2), - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_t2, - .extra2 = &max_t2 - }, - { - .procname = "t3", - .data = (void *)DN_DEV_PARMS_OFFSET(t3), - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_t3, - .extra2 = &max_t3 - }, - { } - }, -}; - -static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) -{ - struct dn_dev_sysctl_table *t; - int i; - - char path[sizeof("net/decnet/conf/") + IFNAMSIZ]; - - t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); - if (t == NULL) - return; - - for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) { - long offset = (long)t->dn_dev_vars[i].data; - t->dn_dev_vars[i].data = ((char *)parms) + offset; - } - - snprintf(path, sizeof(path), "net/decnet/conf/%s", - dev? dev->name : parms->name); - - t->dn_dev_vars[0].extra1 = (void *)dev; - - t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars); - if (t->sysctl_header == NULL) - kfree(t); - else - parms->sysctl = t; -} - -static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) -{ - if (parms->sysctl) { - struct dn_dev_sysctl_table *t = parms->sysctl; - parms->sysctl = NULL; - unregister_net_sysctl_table(t->sysctl_header); - kfree(t); - } -} - -static int dn_forwarding_proc(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ -#ifdef CONFIG_DECNET_ROUTER - struct net_device *dev = table->extra1; - struct dn_dev *dn_db; - int err; - int tmp, old; - - if (table->extra1 == NULL) - return -EINVAL; - - dn_db = rcu_dereference_raw(dev->dn_ptr); - old = dn_db->parms.forwarding; - - err = proc_dointvec(table, write, buffer, lenp, ppos); - - if ((err >= 0) && write) { - if (dn_db->parms.forwarding < 0) - dn_db->parms.forwarding = 0; - if (dn_db->parms.forwarding > 2) - dn_db->parms.forwarding = 2; - /* - * What an ugly hack this is... its works, just. It - * would be nice if sysctl/proc were just that little - * bit more flexible so I don't have to write a special - * routine, or suffer hacks like this - SJW - */ - tmp = dn_db->parms.forwarding; - dn_db->parms.forwarding = old; - if (dn_db->parms.down) - dn_db->parms.down(dev); - dn_db->parms.forwarding = tmp; - if (dn_db->parms.up) - dn_db->parms.up(dev); - } - - return err; -#else - return -EINVAL; -#endif -} - -#else /* CONFIG_SYSCTL */ -static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) -{ -} -static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) -{ -} - -#endif /* CONFIG_SYSCTL */ - -static inline __u16 mtu2blksize(struct net_device *dev) -{ - u32 blksize = dev->mtu; - if (blksize > 0xffff) - blksize = 0xffff; - - if (dev->type == ARPHRD_ETHER || - dev->type == ARPHRD_PPP || - dev->type == ARPHRD_IPGRE || - dev->type == ARPHRD_LOOPBACK) - blksize -= 2; - - return (__u16)blksize; -} - -static struct dn_ifaddr *dn_dev_alloc_ifa(void) -{ - struct dn_ifaddr *ifa; - - ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); - - return ifa; -} - -static void dn_dev_free_ifa(struct dn_ifaddr *ifa) -{ - kfree_rcu(ifa, rcu); -} - -static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy) -{ - struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap); - unsigned char mac_addr[6]; - struct net_device *dev = dn_db->dev; - - ASSERT_RTNL(); - - *ifap = ifa1->ifa_next; - - if (dn_db->dev->type == ARPHRD_ETHER) { - if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { - dn_dn2eth(mac_addr, ifa1->ifa_local); - dev_mc_del(dev, mac_addr); - } - } - - dn_ifaddr_notify(RTM_DELADDR, ifa1); - blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); - if (destroy) { - dn_dev_free_ifa(ifa1); - - if (dn_db->ifa_list == NULL) - dn_dev_delete(dn_db->dev); - } -} - -static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) -{ - struct net_device *dev = dn_db->dev; - struct dn_ifaddr *ifa1; - unsigned char mac_addr[6]; - - ASSERT_RTNL(); - - /* Check for duplicates */ - for (ifa1 = rtnl_dereference(dn_db->ifa_list); - ifa1 != NULL; - ifa1 = rtnl_dereference(ifa1->ifa_next)) { - if (ifa1->ifa_local == ifa->ifa_local) - return -EEXIST; - } - - if (dev->type == ARPHRD_ETHER) { - if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { - dn_dn2eth(mac_addr, ifa->ifa_local); - dev_mc_add(dev, mac_addr); - } - } - - ifa->ifa_next = dn_db->ifa_list; - rcu_assign_pointer(dn_db->ifa_list, ifa); - - dn_ifaddr_notify(RTM_NEWADDR, ifa); - blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); - - return 0; -} - -static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa) -{ - struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); - int rv; - - if (dn_db == NULL) { - int err; - dn_db = dn_dev_create(dev, &err); - if (dn_db == NULL) - return err; - } - - ifa->ifa_dev = dn_db; - - if (dev->flags & IFF_LOOPBACK) - ifa->ifa_scope = RT_SCOPE_HOST; - - rv = dn_dev_insert_ifa(dn_db, ifa); - if (rv) - dn_dev_free_ifa(ifa); - return rv; -} - - -int dn_dev_ioctl(unsigned int cmd, void __user *arg) -{ - char buffer[DN_IFREQ_SIZE]; - struct ifreq *ifr = (struct ifreq *)buffer; - struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr; - struct dn_dev *dn_db; - struct net_device *dev; - struct dn_ifaddr *ifa = NULL; - struct dn_ifaddr __rcu **ifap = NULL; - int ret = 0; - - if (copy_from_user(ifr, arg, DN_IFREQ_SIZE)) - return -EFAULT; - ifr->ifr_name[IFNAMSIZ-1] = 0; - - dev_load(&init_net, ifr->ifr_name); - - switch (cmd) { - case SIOCGIFADDR: - break; - case SIOCSIFADDR: - if (!capable(CAP_NET_ADMIN)) - return -EACCES; - if (sdn->sdn_family != AF_DECnet) - return -EINVAL; - break; - default: - return -EINVAL; - } - - rtnl_lock(); - - if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) { - ret = -ENODEV; - goto done; - } - - if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) { - for (ifap = &dn_db->ifa_list; - (ifa = rtnl_dereference(*ifap)) != NULL; - ifap = &ifa->ifa_next) - if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0) - break; - } - - if (ifa == NULL && cmd != SIOCSIFADDR) { - ret = -EADDRNOTAVAIL; - goto done; - } - - switch (cmd) { - case SIOCGIFADDR: - *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local; - if (copy_to_user(arg, ifr, DN_IFREQ_SIZE)) - ret = -EFAULT; - break; - - case SIOCSIFADDR: - if (!ifa) { - if ((ifa = dn_dev_alloc_ifa()) == NULL) { - ret = -ENOBUFS; - break; - } - memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); - } else { - if (ifa->ifa_local == dn_saddr2dn(sdn)) - break; - dn_dev_del_ifa(dn_db, ifap, 0); - } - - ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn); - - ret = dn_dev_set_ifa(dev, ifa); - } -done: - rtnl_unlock(); - - return ret; -} - -struct net_device *dn_dev_get_default(void) -{ - struct net_device *dev; - - spin_lock(&dndev_lock); - dev = decnet_default_device; - if (dev) { - if (dev->dn_ptr) - dev_hold(dev); - else - dev = NULL; - } - spin_unlock(&dndev_lock); - - return dev; -} - -int dn_dev_set_default(struct net_device *dev, int force) -{ - struct net_device *old = NULL; - int rv = -EBUSY; - if (!dev->dn_ptr) - return -ENODEV; - - spin_lock(&dndev_lock); - if (force || decnet_default_device == NULL) { - old = decnet_default_device; - decnet_default_device = dev; - rv = 0; - } - spin_unlock(&dndev_lock); - - dev_put(old); - return rv; -} - -static void dn_dev_check_default(struct net_device *dev) -{ - spin_lock(&dndev_lock); - if (dev == decnet_default_device) { - decnet_default_device = NULL; - } else { - dev = NULL; - } - spin_unlock(&dndev_lock); - - dev_put(dev); -} - -/* - * Called with RTNL - */ -static struct dn_dev *dn_dev_by_index(int ifindex) -{ - struct net_device *dev; - struct dn_dev *dn_dev = NULL; - - dev = __dev_get_by_index(&init_net, ifindex); - if (dev) - dn_dev = rtnl_dereference(dev->dn_ptr); - - return dn_dev; -} - -static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = { - [IFA_ADDRESS] = { .type = NLA_U16 }, - [IFA_LOCAL] = { .type = NLA_U16 }, - [IFA_LABEL] = { .type = NLA_STRING, - .len = IFNAMSIZ - 1 }, - [IFA_FLAGS] = { .type = NLA_U32 }, -}; - -static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack) -{ - struct net *net = sock_net(skb->sk); - struct nlattr *tb[IFA_MAX+1]; - struct dn_dev *dn_db; - struct ifaddrmsg *ifm; - struct dn_ifaddr *ifa; - struct dn_ifaddr __rcu **ifap; - int err = -EINVAL; - - if (!netlink_capable(skb, CAP_NET_ADMIN)) - return -EPERM; - - if (!net_eq(net, &init_net)) - goto errout; - - err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, - dn_ifa_policy, extack); - if (err < 0) - goto errout; - - err = -ENODEV; - ifm = nlmsg_data(nlh); - if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) - goto errout; - - err = -EADDRNOTAVAIL; - for (ifap = &dn_db->ifa_list; - (ifa = rtnl_dereference(*ifap)) != NULL; - ifap = &ifa->ifa_next) { - if (tb[IFA_LOCAL] && - nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) - continue; - - if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) - continue; - - dn_dev_del_ifa(dn_db, ifap, 1); - return 0; - } - -errout: - return err; -} - -static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack) -{ - struct net *net = sock_net(skb->sk); - struct nlattr *tb[IFA_MAX+1]; - struct net_device *dev; - struct dn_dev *dn_db; - struct ifaddrmsg *ifm; - struct dn_ifaddr *ifa; - int err; - - if (!netlink_capable(skb, CAP_NET_ADMIN)) - return -EPERM; - - if (!net_eq(net, &init_net)) - return -EINVAL; - - err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, - dn_ifa_policy, extack); - if (err < 0) - return err; - - if (tb[IFA_LOCAL] == NULL) - return -EINVAL; - - ifm = nlmsg_data(nlh); - if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) - return -ENODEV; - - if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) { - dn_db = dn_dev_create(dev, &err); - if (!dn_db) - return err; - } - - if ((ifa = dn_dev_alloc_ifa()) == NULL) - return -ENOBUFS; - - if (tb[IFA_ADDRESS] == NULL) - tb[IFA_ADDRESS] = tb[IFA_LOCAL]; - - ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]); - ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]); - ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : - ifm->ifa_flags; - ifa->ifa_scope = ifm->ifa_scope; - ifa->ifa_dev = dn_db; - - if (tb[IFA_LABEL]) - nla_strscpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); - else - memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); - - err = dn_dev_insert_ifa(dn_db, ifa); - if (err) - dn_dev_free_ifa(ifa); - - return err; -} - -static inline size_t dn_ifaddr_nlmsg_size(void) -{ - return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) - + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ - + nla_total_size(2) /* IFA_ADDRESS */ - + nla_total_size(2) /* IFA_LOCAL */ - + nla_total_size(4); /* IFA_FLAGS */ -} - -static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, - u32 portid, u32 seq, int event, unsigned int flags) -{ - struct ifaddrmsg *ifm; - struct nlmsghdr *nlh; - u32 ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT; - - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); - if (nlh == NULL) - return -EMSGSIZE; - - ifm = nlmsg_data(nlh); - ifm->ifa_family = AF_DECnet; - ifm->ifa_prefixlen = 16; - ifm->ifa_flags = ifa_flags; - ifm->ifa_scope = ifa->ifa_scope; - ifm->ifa_index = ifa->ifa_dev->dev->ifindex; - - if ((ifa->ifa_address && - nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) || - (ifa->ifa_local && - nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) || - (ifa->ifa_label[0] && - nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) || - nla_put_u32(skb, IFA_FLAGS, ifa_flags)) - goto nla_put_failure; - nlmsg_end(skb, nlh); - return 0; - -nla_put_failure: - nlmsg_cancel(skb, nlh); - return -EMSGSIZE; -} - -static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) -{ - struct sk_buff *skb; - int err = -ENOBUFS; - - skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL); - if (skb == NULL) - goto errout; - - err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); - if (err < 0) { - /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */ - WARN_ON(err == -EMSGSIZE); - kfree_skb(skb); - goto errout; - } - rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); - return; -errout: - if (err < 0) - rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err); -} - -static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct net *net = sock_net(skb->sk); - int idx, dn_idx = 0, skip_ndevs, skip_naddr; - struct net_device *dev; - struct dn_dev *dn_db; - struct dn_ifaddr *ifa; - - if (!net_eq(net, &init_net)) - return 0; - - skip_ndevs = cb->args[0]; - skip_naddr = cb->args[1]; - - idx = 0; - rcu_read_lock(); - for_each_netdev_rcu(&init_net, dev) { - if (idx < skip_ndevs) - goto cont; - else if (idx > skip_ndevs) { - /* Only skip over addresses for first dev dumped - * in this iteration (idx == skip_ndevs) */ - skip_naddr = 0; - } - - if ((dn_db = rcu_dereference(dev->dn_ptr)) == NULL) - goto cont; - - for (ifa = rcu_dereference(dn_db->ifa_list), dn_idx = 0; ifa; - ifa = rcu_dereference(ifa->ifa_next), dn_idx++) { - if (dn_idx < skip_naddr) - continue; - - if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, RTM_NEWADDR, - NLM_F_MULTI) < 0) - goto done; - } -cont: - idx++; - } -done: - rcu_read_unlock(); - cb->args[0] = idx; - cb->args[1] = dn_idx; - - return skb->len; -} - -static int dn_dev_get_first(struct net_device *dev, __le16 *addr) -{ - struct dn_dev *dn_db; - struct dn_ifaddr *ifa; - int rv = -ENODEV; - - rcu_read_lock(); - dn_db = rcu_dereference(dev->dn_ptr); - if (dn_db == NULL) - goto out; - - ifa = rcu_dereference(dn_db->ifa_list); - if (ifa != NULL) { - *addr = ifa->ifa_local; - rv = 0; - } -out: - rcu_read_unlock(); - return rv; -} - -/* - * Find a default address to bind to. - * - * This is one of those areas where the initial VMS concepts don't really - * map onto the Linux concepts, and since we introduced multiple addresses - * per interface we have to cope with slightly odd ways of finding out what - * "our address" really is. Mostly it's not a problem; for this we just guess - * a sensible default. Eventually the routing code will take care of all the - * nasties for us I hope. - */ -int dn_dev_bind_default(__le16 *addr) -{ - struct net_device *dev; - int rv; - dev = dn_dev_get_default(); -last_chance: - if (dev) { - rv = dn_dev_get_first(dev, addr); - dev_put(dev); - if (rv == 0 || dev == init_net.loopback_dev) - return rv; - } - dev = init_net.loopback_dev; - dev_hold(dev); - goto last_chance; -} - -static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) -{ - struct endnode_hello_message *msg; - struct sk_buff *skb = NULL; - __le16 *pktlen; - struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); - - if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) - return; - - skb->dev = dev; - - msg = skb_put(skb, sizeof(*msg)); - - msg->msgflg = 0x0D; - memcpy(msg->tiver, dn_eco_version, 3); - dn_dn2eth(msg->id, ifa->ifa_local); - msg->iinfo = DN_RT_INFO_ENDN; - msg->blksize = cpu_to_le16(mtu2blksize(dev)); - msg->area = 0x00; - memset(msg->seed, 0, 8); - memcpy(msg->neighbor, dn_hiord, ETH_ALEN); - - if (dn_db->router) { - struct dn_neigh *dn = container_of(dn_db->router, struct dn_neigh, n); - dn_dn2eth(msg->neighbor, dn->addr); - } - - msg->timer = cpu_to_le16((unsigned short)dn_db->parms.t3); - msg->mpd = 0x00; - msg->datalen = 0x02; - memset(msg->data, 0xAA, 2); - - pktlen = skb_push(skb, 2); - *pktlen = cpu_to_le16(skb->len - 2); - - skb_reset_network_header(skb); - - dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id); -} - - -#define DRDELAY (5 * HZ) - -static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa) -{ - /* First check time since device went up */ - if (time_before(jiffies, dn_db->uptime + DRDELAY)) - return 0; - - /* If there is no router, then yes... */ - if (!dn_db->router) - return 1; - - /* otherwise only if we have a higher priority or.. */ - if (dn->priority < dn_db->parms.priority) - return 1; - - /* if we have equal priority and a higher node number */ - if (dn->priority != dn_db->parms.priority) - return 0; - - if (le16_to_cpu(dn->addr) < le16_to_cpu(ifa->ifa_local)) - return 1; - - return 0; -} - -static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) -{ - int n; - struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); - struct dn_neigh *dn = container_of(dn_db->router, struct dn_neigh, n); - struct sk_buff *skb; - size_t size; - unsigned char *ptr; - unsigned char *i1, *i2; - __le16 *pktlen; - char *src; - - if (mtu2blksize(dev) < (26 + 7)) - return; - - n = mtu2blksize(dev) - 26; - n /= 7; - - if (n > 32) - n = 32; - - size = 2 + 26 + 7 * n; - - if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL) - return; - - skb->dev = dev; - ptr = skb_put(skb, size); - - *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH; - *ptr++ = 2; /* ECO */ - *ptr++ = 0; - *ptr++ = 0; - dn_dn2eth(ptr, ifa->ifa_local); - src = ptr; - ptr += ETH_ALEN; - *ptr++ = dn_db->parms.forwarding == 1 ? - DN_RT_INFO_L1RT : DN_RT_INFO_L2RT; - *((__le16 *)ptr) = cpu_to_le16(mtu2blksize(dev)); - ptr += 2; - *ptr++ = dn_db->parms.priority; /* Priority */ - *ptr++ = 0; /* Area: Reserved */ - *((__le16 *)ptr) = cpu_to_le16((unsigned short)dn_db->parms.t3); - ptr += 2; - *ptr++ = 0; /* MPD: Reserved */ - i1 = ptr++; - memset(ptr, 0, 7); /* Name: Reserved */ - ptr += 7; - i2 = ptr++; - - n = dn_neigh_elist(dev, ptr, n); - - *i2 = 7 * n; - *i1 = 8 + *i2; - - skb_trim(skb, (27 + *i2)); - - pktlen = skb_push(skb, 2); - *pktlen = cpu_to_le16(skb->len - 2); - - skb_reset_network_header(skb); - - if (dn_am_i_a_router(dn, dn_db, ifa)) { - struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); - if (skb2) { - dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src); - } - } - - dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); -} - -static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa) -{ - struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); - - if (dn_db->parms.forwarding == 0) - dn_send_endnode_hello(dev, ifa); - else - dn_send_router_hello(dev, ifa); -} - -static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa) -{ - int tdlen = 16; - int size = dev->hard_header_len + 2 + 4 + tdlen; - struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC); - int i; - unsigned char *ptr; - char src[ETH_ALEN]; - - if (skb == NULL) - return ; - - skb->dev = dev; - skb_push(skb, dev->hard_header_len); - ptr = skb_put(skb, 2 + 4 + tdlen); - - *ptr++ = DN_RT_PKT_HELO; - *((__le16 *)ptr) = ifa->ifa_local; - ptr += 2; - *ptr++ = tdlen; - - for(i = 0; i < tdlen; i++) - *ptr++ = 0252; - - dn_dn2eth(src, ifa->ifa_local); - dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); -} - -static int dn_eth_up(struct net_device *dev) -{ - struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); - - if (dn_db->parms.forwarding == 0) - dev_mc_add(dev, dn_rt_all_end_mcast); - else - dev_mc_add(dev, dn_rt_all_rt_mcast); - - dn_db->use_long = 1; - - return 0; -} - -static void dn_eth_down(struct net_device *dev) -{ - struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); - - if (dn_db->parms.forwarding == 0) - dev_mc_del(dev, dn_rt_all_end_mcast); - else - dev_mc_del(dev, dn_rt_all_rt_mcast); -} - -static void dn_dev_set_timer(struct net_device *dev); - -static void dn_dev_timer_func(struct timer_list *t) -{ - struct dn_dev *dn_db = from_timer(dn_db, t, timer); - struct net_device *dev; - struct dn_ifaddr *ifa; - - rcu_read_lock(); - dev = dn_db->dev; - if (dn_db->t3 <= dn_db->parms.t2) { - if (dn_db->parms.timer3) { - for (ifa = rcu_dereference(dn_db->ifa_list); - ifa; - ifa = rcu_dereference(ifa->ifa_next)) { - if (!(ifa->ifa_flags & IFA_F_SECONDARY)) - dn_db->parms.timer3(dev, ifa); - } - } - dn_db->t3 = dn_db->parms.t3; - } else { - dn_db->t3 -= dn_db->parms.t2; - } - rcu_read_unlock(); - dn_dev_set_timer(dev); -} - -static void dn_dev_set_timer(struct net_device *dev) -{ - struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); - - if (dn_db->parms.t2 > dn_db->parms.t3) - dn_db->parms.t2 = dn_db->parms.t3; - - dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ); - - add_timer(&dn_db->timer); -} - -static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) -{ - int i; - struct dn_dev_parms *p = dn_dev_list; - struct dn_dev *dn_db; - - for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) { - if (p->type == dev->type) - break; - } - - *err = -ENODEV; - if (i == DN_DEV_LIST_SIZE) - return NULL; - - *err = -ENOBUFS; - if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) - return NULL; - - memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); - - rcu_assign_pointer(dev->dn_ptr, dn_db); - dn_db->dev = dev; - timer_setup(&dn_db->timer, dn_dev_timer_func, 0); - - dn_db->uptime = jiffies; - - dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); - if (!dn_db->neigh_parms) { - RCU_INIT_POINTER(dev->dn_ptr, NULL); - kfree(dn_db); - return NULL; - } - - if (dn_db->parms.up) { - if (dn_db->parms.up(dev) < 0) { - neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); - dev->dn_ptr = NULL; - kfree(dn_db); - return NULL; - } - } - - dn_dev_sysctl_register(dev, &dn_db->parms); - - dn_dev_set_timer(dev); - - *err = 0; - return dn_db; -} - - -/* - * This processes a device up event. We only start up - * the loopback device & ethernet devices with correct - * MAC addresses automatically. Others must be started - * specifically. - * - * FIXME: How should we configure the loopback address ? If we could dispense - * with using decnet_address here and for autobind, it will be one less thing - * for users to worry about setting up. - */ - -void dn_dev_up(struct net_device *dev) -{ - struct dn_ifaddr *ifa; - __le16 addr = decnet_address; - int maybe_default = 0; - struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); - - if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) - return; - - /* - * Need to ensure that loopback device has a dn_db attached to it - * to allow creation of neighbours against it, even though it might - * not have a local address of its own. Might as well do the same for - * all autoconfigured interfaces. - */ - if (dn_db == NULL) { - int err; - dn_db = dn_dev_create(dev, &err); - if (dn_db == NULL) - return; - } - - if (dev->type == ARPHRD_ETHER) { - if (memcmp(dev->dev_addr, dn_hiord, 4) != 0) - return; - addr = dn_eth2dn(dev->dev_addr); - maybe_default = 1; - } - - if (addr == 0) - return; - - if ((ifa = dn_dev_alloc_ifa()) == NULL) - return; - - ifa->ifa_local = ifa->ifa_address = addr; - ifa->ifa_flags = 0; - ifa->ifa_scope = RT_SCOPE_UNIVERSE; - strcpy(ifa->ifa_label, dev->name); - - dn_dev_set_ifa(dev, ifa); - - /* - * Automagically set the default device to the first automatically - * configured ethernet card in the system. - */ - if (maybe_default) { - dev_hold(dev); - if (dn_dev_set_default(dev, 0)) - dev_put(dev); - } -} - -static void dn_dev_delete(struct net_device *dev) -{ - struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); - - if (dn_db == NULL) - return; - - del_timer_sync(&dn_db->timer); - dn_dev_sysctl_unregister(&dn_db->parms); - dn_dev_check_default(dev); - neigh_ifdown(&dn_neigh_table, dev); - - if (dn_db->parms.down) - dn_db->parms.down(dev); - - dev->dn_ptr = NULL; - - neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); - neigh_ifdown(&dn_neigh_table, dev); - - if (dn_db->router) - neigh_release(dn_db->router); - if (dn_db->peer) - neigh_release(dn_db->peer); - - kfree(dn_db); -} - -void dn_dev_down(struct net_device *dev) -{ - struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); - struct dn_ifaddr *ifa; - - if (dn_db == NULL) - return; - - while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) { - dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0); - dn_dev_free_ifa(ifa); - } - - dn_dev_delete(dev); -} - -void dn_dev_init_pkt(struct sk_buff *skb) -{ -} - -void dn_dev_veri_pkt(struct sk_buff *skb) -{ -} - -void dn_dev_hello(struct sk_buff *skb) -{ -} - -void dn_dev_devices_off(void) -{ - struct net_device *dev; - - rtnl_lock(); - for_each_netdev(&init_net, dev) - dn_dev_down(dev); - rtnl_unlock(); - -} - -void dn_dev_devices_on(void) -{ - struct net_device *dev; - - rtnl_lock(); - for_each_netdev(&init_net, dev) { - if (dev->flags & IFF_UP) - dn_dev_up(dev); - } - rtnl_unlock(); -} - -int register_dnaddr_notifier(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&dnaddr_chain, nb); -} - -int unregister_dnaddr_notifier(struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&dnaddr_chain, nb); -} - -#ifdef CONFIG_PROC_FS -static inline int is_dn_dev(struct net_device *dev) -{ - return dev->dn_ptr != NULL; -} - -static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(RCU) -{ - int i; - struct net_device *dev; - - rcu_read_lock(); - - if (*pos == 0) - return SEQ_START_TOKEN; - - i = 1; - for_each_netdev_rcu(&init_net, dev) { - if (!is_dn_dev(dev)) - continue; - - if (i++ == *pos) - return dev; - } - - return NULL; -} - -static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - struct net_device *dev; - - ++*pos; - - dev = v; - if (v == SEQ_START_TOKEN) - dev = net_device_entry(&init_net.dev_base_head); - - for_each_netdev_continue_rcu(&init_net, dev) { - if (!is_dn_dev(dev)) - continue; - - return dev; - } - - return NULL; -} - -static void dn_dev_seq_stop(struct seq_file *seq, void *v) - __releases(RCU) -{ - rcu_read_unlock(); -} - -static char *dn_type2asc(char type) -{ - switch (type) { - case DN_DEV_BCAST: - return "B"; - case DN_DEV_UCAST: - return "U"; - case DN_DEV_MPOINT: - return "M"; - } - - return "?"; -} - -static int dn_dev_seq_show(struct seq_file *seq, void *v) -{ - if (v == SEQ_START_TOKEN) - seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n"); - else { - struct net_device *dev = v; - char peer_buf[DN_ASCBUF_LEN]; - char router_buf[DN_ASCBUF_LEN]; - struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr); - - seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" - " %04hu %03d %02x %-10s %-7s %-7s\n", - dev->name, - dn_type2asc(dn_db->parms.mode), - 0, 0, - dn_db->t3, dn_db->parms.t3, - mtu2blksize(dev), - dn_db->parms.priority, - dn_db->parms.state, dn_db->parms.name, - dn_db->router ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->router->primary_key), router_buf) : "", - dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : ""); - } - return 0; -} - -static const struct seq_operations dn_dev_seq_ops = { - .start = dn_dev_seq_start, - .next = dn_dev_seq_next, - .stop = dn_dev_seq_stop, - .show = dn_dev_seq_show, -}; -#endif /* CONFIG_PROC_FS */ - -static int addr[2]; -module_param_array(addr, int, NULL, 0444); -MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); - -void __init dn_dev_init(void) -{ - if (addr[0] > 63 || addr[0] < 0) { - printk(KERN_ERR "DECnet: Area must be between 0 and 63"); - return; - } - - if (addr[1] > 1023 || addr[1] < 0) { - printk(KERN_ERR "DECnet: Node must be between 0 and 1023"); - return; - } - - decnet_address = cpu_to_le16((addr[0] << 10) | addr[1]); - - dn_dev_devices_on(); - - rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWADDR, - dn_nl_newaddr, NULL, 0); - rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELADDR, - dn_nl_deladdr, NULL, 0); - rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETADDR, - NULL, dn_nl_dump_ifaddr, 0); - - proc_create_seq("decnet_dev", 0444, init_net.proc_net, &dn_dev_seq_ops); - -#ifdef CONFIG_SYSCTL - { - int i; - for(i = 0; i < DN_DEV_LIST_SIZE; i++) - dn_dev_sysctl_register(NULL, &dn_dev_list[i]); - } -#endif /* CONFIG_SYSCTL */ -} - -void __exit dn_dev_cleanup(void) -{ -#ifdef CONFIG_SYSCTL - { - int i; - for(i = 0; i < DN_DEV_LIST_SIZE; i++) - dn_dev_sysctl_unregister(&dn_dev_list[i]); - } -#endif /* CONFIG_SYSCTL */ - - remove_proc_entry("decnet_dev", init_net.proc_net); - - dn_dev_devices_off(); -} diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c deleted file mode 100644 index 269c029ad74f..000000000000 --- a/net/decnet/dn_fib.c +++ /dev/null @@ -1,798 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Routing Forwarding Information Base (Glue/Info List) - * - * Author: Steve Whitehouse <SteveW@ACM.org> - * - * - * Changes: - * Alexey Kuznetsov : SMP locking changes - * Steve Whitehouse : Rewrote it... Well to be more correct, I - * copied most of it from the ipv4 fib code. - * Steve Whitehouse : Updated it in style and fixed a few bugs - * which were fixed in the ipv4 code since - * this code was copied from it. - * - */ -#include <linux/string.h> -#include <linux/net.h> -#include <linux/socket.h> -#include <linux/slab.h> -#include <linux/sockios.h> -#include <linux/init.h> -#include <linux/skbuff.h> -#include <linux/netlink.h> -#include <linux/rtnetlink.h> -#include <linux/proc_fs.h> -#include <linux/netdevice.h> -#include <linux/timer.h> -#include <linux/spinlock.h> -#include <linux/atomic.h> -#include <linux/uaccess.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/flow.h> -#include <net/fib_rules.h> -#include <net/dn.h> -#include <net/dn_route.h> -#include <net/dn_fib.h> -#include <net/dn_neigh.h> -#include <net/dn_dev.h> -#include <net/rtnh.h> - -#define RT_MIN_TABLE 1 - -#define for_fib_info() { struct dn_fib_info *fi;\ - for(fi = dn_fib_info_list; fi; fi = fi->fib_next) -#define endfor_fib_info() } - -#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ - for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) - -#define change_nexthops(fi) { int nhsel; struct dn_fib_nh *nh;\ - for(nhsel = 0, nh = (struct dn_fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) - -#define endfor_nexthops(fi) } - -static DEFINE_SPINLOCK(dn_fib_multipath_lock); -static struct dn_fib_info *dn_fib_info_list; -static DEFINE_SPINLOCK(dn_fib_info_lock); - -static struct -{ - int error; - u8 scope; -} dn_fib_props[RTN_MAX+1] = { - [RTN_UNSPEC] = { .error = 0, .scope = RT_SCOPE_NOWHERE }, - [RTN_UNICAST] = { .error = 0, .scope = RT_SCOPE_UNIVERSE }, - [RTN_LOCAL] = { .error = 0, .scope = RT_SCOPE_HOST }, - [RTN_BROADCAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, - [RTN_ANYCAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, - [RTN_MULTICAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, - [RTN_BLACKHOLE] = { .error = -EINVAL, .scope = RT_SCOPE_UNIVERSE }, - [RTN_UNREACHABLE] = { .error = -EHOSTUNREACH, .scope = RT_SCOPE_UNIVERSE }, - [RTN_PROHIBIT] = { .error = -EACCES, .scope = RT_SCOPE_UNIVERSE }, - [RTN_THROW] = { .error = -EAGAIN, .scope = RT_SCOPE_UNIVERSE }, - [RTN_NAT] = { .error = 0, .scope = RT_SCOPE_NOWHERE }, - [RTN_XRESOLVE] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, -}; - -static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force); -static int dn_fib_sync_up(struct net_device *dev); - -void dn_fib_free_info(struct dn_fib_info *fi) -{ - if (fi->fib_dead == 0) { - printk(KERN_DEBUG "DECnet: BUG! Attempt to free alive dn_fib_info\n"); - return; - } - - change_nexthops(fi) { - dev_put(nh->nh_dev); - nh->nh_dev = NULL; - } endfor_nexthops(fi); - kfree(fi); -} - -void dn_fib_release_info(struct dn_fib_info *fi) -{ - spin_lock(&dn_fib_info_lock); - if (fi && refcount_dec_and_test(&fi->fib_treeref)) { - if (fi->fib_next) - fi->fib_next->fib_prev = fi->fib_prev; - if (fi->fib_prev) - fi->fib_prev->fib_next = fi->fib_next; - if (fi == dn_fib_info_list) - dn_fib_info_list = fi->fib_next; - fi->fib_dead = 1; - dn_fib_info_put(fi); - } - spin_unlock(&dn_fib_info_lock); -} - -static inline int dn_fib_nh_comp(const struct dn_fib_info *fi, const struct dn_fib_info *ofi) -{ - const struct dn_fib_nh *onh = ofi->fib_nh; - - for_nexthops(fi) { - if (nh->nh_oif != onh->nh_oif || - nh->nh_gw != onh->nh_gw || - nh->nh_scope != onh->nh_scope || - nh->nh_weight != onh->nh_weight || - ((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD)) - return -1; - onh++; - } endfor_nexthops(fi); - return 0; -} - -static inline struct dn_fib_info *dn_fib_find_info(const struct dn_fib_info *nfi) -{ - for_fib_info() { - if (fi->fib_nhs != nfi->fib_nhs) - continue; - if (nfi->fib_protocol == fi->fib_protocol && - nfi->fib_prefsrc == fi->fib_prefsrc && - nfi->fib_priority == fi->fib_priority && - memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(fi->fib_metrics)) == 0 && - ((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 && - (nfi->fib_nhs == 0 || dn_fib_nh_comp(fi, nfi) == 0)) - return fi; - } endfor_fib_info(); - return NULL; -} - -static int dn_fib_count_nhs(const struct nlattr *attr) -{ - struct rtnexthop *nhp = nla_data(attr); - int nhs = 0, nhlen = nla_len(attr); - - while (rtnh_ok(nhp, nhlen)) { - nhs++; - nhp = rtnh_next(nhp, &nhlen); - } - - /* leftover implies invalid nexthop configuration, discard it */ - return nhlen > 0 ? 0 : nhs; -} - -static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr, - const struct rtmsg *r) -{ - struct rtnexthop *nhp = nla_data(attr); - int nhlen = nla_len(attr); - - change_nexthops(fi) { - int attrlen; - - if (!rtnh_ok(nhp, nhlen)) - return -EINVAL; - - nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; - nh->nh_oif = nhp->rtnh_ifindex; - nh->nh_weight = nhp->rtnh_hops + 1; - - attrlen = rtnh_attrlen(nhp); - if (attrlen > 0) { - struct nlattr *gw_attr; - - gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY); - nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0; - } - - nhp = rtnh_next(nhp, &nhlen); - } endfor_nexthops(fi); - - return 0; -} - - -static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct dn_fib_nh *nh) -{ - int err; - - if (nh->nh_gw) { - struct flowidn fld; - struct dn_fib_res res; - - if (nh->nh_flags&RTNH_F_ONLINK) { - struct net_device *dev; - - if (r->rtm_scope >= RT_SCOPE_LINK) - return -EINVAL; - if (dnet_addr_type(nh->nh_gw) != RTN_UNICAST) - return -EINVAL; - if ((dev = __dev_get_by_index(&init_net, nh->nh_oif)) == NULL) - return -ENODEV; - if (!(dev->flags&IFF_UP)) - return -ENETDOWN; - nh->nh_dev = dev; - dev_hold(dev); - nh->nh_scope = RT_SCOPE_LINK; - return 0; - } - - memset(&fld, 0, sizeof(fld)); - fld.daddr = nh->nh_gw; - fld.flowidn_oif = nh->nh_oif; - fld.flowidn_scope = r->rtm_scope + 1; - - if (fld.flowidn_scope < RT_SCOPE_LINK) - fld.flowidn_scope = RT_SCOPE_LINK; - - if ((err = dn_fib_lookup(&fld, &res)) != 0) - return err; - - err = -EINVAL; - if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) - goto out; - nh->nh_scope = res.scope; - nh->nh_oif = DN_FIB_RES_OIF(res); - nh->nh_dev = DN_FIB_RES_DEV(res); - if (nh->nh_dev == NULL) - goto out; - dev_hold(nh->nh_dev); - err = -ENETDOWN; - if (!(nh->nh_dev->flags & IFF_UP)) - goto out; - err = 0; -out: - dn_fib_res_put(&res); - return err; - } else { - struct net_device *dev; - - if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK)) - return -EINVAL; - - dev = __dev_get_by_index(&init_net, nh->nh_oif); - if (dev == NULL || dev->dn_ptr == NULL) - return -ENODEV; - if (!(dev->flags&IFF_UP)) - return -ENETDOWN; - nh->nh_dev = dev; - dev_hold(nh->nh_dev); - nh->nh_scope = RT_SCOPE_HOST; - } - - return 0; -} - - -struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct nlattr *attrs[], - const struct nlmsghdr *nlh, int *errp) -{ - int err; - struct dn_fib_info *fi = NULL; - struct dn_fib_info *ofi; - int nhs = 1; - - if (r->rtm_type > RTN_MAX) - goto err_inval; - - if (dn_fib_props[r->rtm_type].scope > r->rtm_scope) - goto err_inval; - - if (attrs[RTA_MULTIPATH] && - (nhs = dn_fib_count_nhs(attrs[RTA_MULTIPATH])) == 0) - goto err_inval; - - fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL); - err = -ENOBUFS; - if (fi == NULL) - goto failure; - - fi->fib_protocol = r->rtm_protocol; - fi->fib_nhs = nhs; - fi->fib_flags = r->rtm_flags; - - if (attrs[RTA_PRIORITY]) - fi->fib_priority = nla_get_u32(attrs[RTA_PRIORITY]); - - if (attrs[RTA_METRICS]) { - struct nlattr *attr; - int rem; - - nla_for_each_nested(attr, attrs[RTA_METRICS], rem) { - int type = nla_type(attr); - - if (type) { - if (type > RTAX_MAX || type == RTAX_CC_ALGO || - nla_len(attr) < 4) - goto err_inval; - - fi->fib_metrics[type-1] = nla_get_u32(attr); - } - } - } - - if (attrs[RTA_PREFSRC]) - fi->fib_prefsrc = nla_get_le16(attrs[RTA_PREFSRC]); - - if (attrs[RTA_MULTIPATH]) { - if ((err = dn_fib_get_nhs(fi, attrs[RTA_MULTIPATH], r)) != 0) - goto failure; - - if (attrs[RTA_OIF] && - fi->fib_nh->nh_oif != nla_get_u32(attrs[RTA_OIF])) - goto err_inval; - - if (attrs[RTA_GATEWAY] && - fi->fib_nh->nh_gw != nla_get_le16(attrs[RTA_GATEWAY])) - goto err_inval; - } else { - struct dn_fib_nh *nh = fi->fib_nh; - - if (attrs[RTA_OIF]) - nh->nh_oif = nla_get_u32(attrs[RTA_OIF]); - - if (attrs[RTA_GATEWAY]) - nh->nh_gw = nla_get_le16(attrs[RTA_GATEWAY]); - - nh->nh_flags = r->rtm_flags; - nh->nh_weight = 1; - } - - if (r->rtm_type == RTN_NAT) { - if (!attrs[RTA_GATEWAY] || nhs != 1 || attrs[RTA_OIF]) - goto err_inval; - - fi->fib_nh->nh_gw = nla_get_le16(attrs[RTA_GATEWAY]); - goto link_it; - } - - if (dn_fib_props[r->rtm_type].error) { - if (attrs[RTA_GATEWAY] || attrs[RTA_OIF] || attrs[RTA_MULTIPATH]) - goto err_inval; - - goto link_it; - } - - if (r->rtm_scope > RT_SCOPE_HOST) - goto err_inval; - - if (r->rtm_scope == RT_SCOPE_HOST) { - struct dn_fib_nh *nh = fi->fib_nh; - - /* Local address is added */ - if (nhs != 1 || nh->nh_gw) - goto err_inval; - nh->nh_scope = RT_SCOPE_NOWHERE; - nh->nh_dev = dev_get_by_index(&init_net, fi->fib_nh->nh_oif); - err = -ENODEV; - if (nh->nh_dev == NULL) - goto failure; - } else { - change_nexthops(fi) { - if ((err = dn_fib_check_nh(r, fi, nh)) != 0) - goto failure; - } endfor_nexthops(fi) - } - - if (fi->fib_prefsrc) { - if (r->rtm_type != RTN_LOCAL || !attrs[RTA_DST] || - fi->fib_prefsrc != nla_get_le16(attrs[RTA_DST])) - if (dnet_addr_type(fi->fib_prefsrc) != RTN_LOCAL) - goto err_inval; - } - -link_it: - if ((ofi = dn_fib_find_info(fi)) != NULL) { - fi->fib_dead = 1; - dn_fib_free_info(fi); - refcount_inc(&ofi->fib_treeref); - return ofi; - } - - refcount_set(&fi->fib_treeref, 1); - refcount_set(&fi->fib_clntref, 1); - spin_lock(&dn_fib_info_lock); - fi->fib_next = dn_fib_info_list; - fi->fib_prev = NULL; - if (dn_fib_info_list) - dn_fib_info_list->fib_prev = fi; - dn_fib_info_list = fi; - spin_unlock(&dn_fib_info_lock); - return fi; - -err_inval: - err = -EINVAL; - -failure: - *errp = err; - if (fi) { - fi->fib_dead = 1; - dn_fib_free_info(fi); - } - - return NULL; -} - -int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn *fld, struct dn_fib_res *res) -{ - int err = dn_fib_props[type].error; - - if (err == 0) { - if (fi->fib_flags & RTNH_F_DEAD) - return 1; - - res->fi = fi; - - switch (type) { - case RTN_NAT: - DN_FIB_RES_RESET(*res); - refcount_inc(&fi->fib_clntref); - return 0; - case RTN_UNICAST: - case RTN_LOCAL: - for_nexthops(fi) { - if (nh->nh_flags & RTNH_F_DEAD) - continue; - if (!fld->flowidn_oif || - fld->flowidn_oif == nh->nh_oif) - break; - } - if (nhsel < fi->fib_nhs) { - res->nh_sel = nhsel; - refcount_inc(&fi->fib_clntref); - return 0; - } - endfor_nexthops(fi); - res->fi = NULL; - return 1; - default: - net_err_ratelimited("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", - type); - res->fi = NULL; - return -EINVAL; - } - } - return err; -} - -void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res) -{ - struct dn_fib_info *fi = res->fi; - int w; - - spin_lock_bh(&dn_fib_multipath_lock); - if (fi->fib_power <= 0) { - int power = 0; - change_nexthops(fi) { - if (!(nh->nh_flags&RTNH_F_DEAD)) { - power += nh->nh_weight; - nh->nh_power = nh->nh_weight; - } - } endfor_nexthops(fi); - fi->fib_power = power; - if (power < 0) { - spin_unlock_bh(&dn_fib_multipath_lock); - res->nh_sel = 0; - return; - } - } - - w = jiffies % fi->fib_power; - - change_nexthops(fi) { - if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { - if ((w -= nh->nh_power) <= 0) { - nh->nh_power--; - fi->fib_power--; - res->nh_sel = nhsel; - spin_unlock_bh(&dn_fib_multipath_lock); - return; - } - } - } endfor_nexthops(fi); - res->nh_sel = 0; - spin_unlock_bh(&dn_fib_multipath_lock); -} - -static inline u32 rtm_get_table(struct nlattr *attrs[], u8 table) -{ - if (attrs[RTA_TABLE]) - table = nla_get_u32(attrs[RTA_TABLE]); - - return table; -} - -static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack) -{ - struct net *net = sock_net(skb->sk); - struct dn_fib_table *tb; - struct rtmsg *r = nlmsg_data(nlh); - struct nlattr *attrs[RTA_MAX+1]; - int err; - - if (!netlink_capable(skb, CAP_NET_ADMIN)) - return -EPERM; - - if (!net_eq(net, &init_net)) - return -EINVAL; - - err = nlmsg_parse_deprecated(nlh, sizeof(*r), attrs, RTA_MAX, - rtm_dn_policy, extack); - if (err < 0) - return err; - - tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 0); - if (!tb) - return -ESRCH; - - return tb->delete(tb, r, attrs, nlh, &NETLINK_CB(skb)); -} - -static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack) -{ - struct net *net = sock_net(skb->sk); - struct dn_fib_table *tb; - struct rtmsg *r = nlmsg_data(nlh); - struct nlattr *attrs[RTA_MAX+1]; - int err; - - if (!netlink_capable(skb, CAP_NET_ADMIN)) - return -EPERM; - - if (!net_eq(net, &init_net)) - return -EINVAL; - - err = nlmsg_parse_deprecated(nlh, sizeof(*r), attrs, RTA_MAX, - rtm_dn_policy, extack); - if (err < 0) - return err; - - tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 1); - if (!tb) - return -ENOBUFS; - - return tb->insert(tb, r, attrs, nlh, &NETLINK_CB(skb)); -} - -static void fib_magic(int cmd, int type, __le16 dst, int dst_len, struct dn_ifaddr *ifa) -{ - struct dn_fib_table *tb; - struct { - struct nlmsghdr nlh; - struct rtmsg rtm; - } req; - struct { - struct nlattr hdr; - __le16 dst; - } dst_attr = { - .dst = dst, - }; - struct { - struct nlattr hdr; - __le16 prefsrc; - } prefsrc_attr = { - .prefsrc = ifa->ifa_local, - }; - struct { - struct nlattr hdr; - u32 oif; - } oif_attr = { - .oif = ifa->ifa_dev->dev->ifindex, - }; - struct nlattr *attrs[RTA_MAX+1] = { - [RTA_DST] = (struct nlattr *) &dst_attr, - [RTA_PREFSRC] = (struct nlattr * ) &prefsrc_attr, - [RTA_OIF] = (struct nlattr *) &oif_attr, - }; - - memset(&req.rtm, 0, sizeof(req.rtm)); - - if (type == RTN_UNICAST) - tb = dn_fib_get_table(RT_MIN_TABLE, 1); - else - tb = dn_fib_get_table(RT_TABLE_LOCAL, 1); - - if (tb == NULL) - return; - - req.nlh.nlmsg_len = sizeof(req); - req.nlh.nlmsg_type = cmd; - req.nlh.nlmsg_flags = NLM_F_REQUEST|NLM_F_CREATE|NLM_F_APPEND; - req.nlh.nlmsg_pid = 0; - req.nlh.nlmsg_seq = 0; - - req.rtm.rtm_dst_len = dst_len; - req.rtm.rtm_table = tb->n; - req.rtm.rtm_protocol = RTPROT_KERNEL; - req.rtm.rtm_scope = (type != RTN_LOCAL ? RT_SCOPE_LINK : RT_SCOPE_HOST); - req.rtm.rtm_type = type; - - if (cmd == RTM_NEWROUTE) - tb->insert(tb, &req.rtm, attrs, &req.nlh, NULL); - else - tb->delete(tb, &req.rtm, attrs, &req.nlh, NULL); -} - -static void dn_fib_add_ifaddr(struct dn_ifaddr *ifa) -{ - - fib_magic(RTM_NEWROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa); - -#if 0 - if (!(dev->flags&IFF_UP)) - return; - /* In the future, we will want to add default routes here */ - -#endif -} - -static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa) -{ - int found_it = 0; - struct net_device *dev; - struct dn_dev *dn_db; - struct dn_ifaddr *ifa2; - - ASSERT_RTNL(); - - /* Scan device list */ - rcu_read_lock(); - for_each_netdev_rcu(&init_net, dev) { - dn_db = rcu_dereference(dev->dn_ptr); - if (dn_db == NULL) - continue; - for (ifa2 = rcu_dereference(dn_db->ifa_list); - ifa2 != NULL; - ifa2 = rcu_dereference(ifa2->ifa_next)) { - if (ifa2->ifa_local == ifa->ifa_local) { - found_it = 1; - break; - } - } - } - rcu_read_unlock(); - - if (found_it == 0) { - fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa); - - if (dnet_addr_type(ifa->ifa_local) != RTN_LOCAL) { - if (dn_fib_sync_down(ifa->ifa_local, NULL, 0)) - dn_fib_flush(); - } - } -} - -static void dn_fib_disable_addr(struct net_device *dev, int force) -{ - if (dn_fib_sync_down(0, dev, force)) - dn_fib_flush(); - dn_rt_cache_flush(0); - neigh_ifdown(&dn_neigh_table, dev); -} - -static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event, void *ptr) -{ - struct dn_ifaddr *ifa = (struct dn_ifaddr *)ptr; - - switch (event) { - case NETDEV_UP: - dn_fib_add_ifaddr(ifa); - dn_fib_sync_up(ifa->ifa_dev->dev); - dn_rt_cache_flush(-1); - break; - case NETDEV_DOWN: - dn_fib_del_ifaddr(ifa); - if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) { - dn_fib_disable_addr(ifa->ifa_dev->dev, 1); - } else { - dn_rt_cache_flush(-1); - } - break; - } - return NOTIFY_DONE; -} - -static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force) -{ - int ret = 0; - int scope = RT_SCOPE_NOWHERE; - - if (force) - scope = -1; - - for_fib_info() { - /* - * This makes no sense for DECnet.... we will almost - * certainly have more than one local address the same - * over all our interfaces. It needs thinking about - * some more. - */ - if (local && fi->fib_prefsrc == local) { - fi->fib_flags |= RTNH_F_DEAD; - ret++; - } else if (dev && fi->fib_nhs) { - int dead = 0; - - change_nexthops(fi) { - if (nh->nh_flags&RTNH_F_DEAD) - dead++; - else if (nh->nh_dev == dev && - nh->nh_scope != scope) { - spin_lock_bh(&dn_fib_multipath_lock); - nh->nh_flags |= RTNH_F_DEAD; - fi->fib_power -= nh->nh_power; - nh->nh_power = 0; - spin_unlock_bh(&dn_fib_multipath_lock); - dead++; - } - } endfor_nexthops(fi) - if (dead == fi->fib_nhs) { - fi->fib_flags |= RTNH_F_DEAD; - ret++; - } - } - } endfor_fib_info(); - return ret; -} - - -static int dn_fib_sync_up(struct net_device *dev) -{ - int ret = 0; - - if (!(dev->flags&IFF_UP)) - return 0; - - for_fib_info() { - int alive = 0; - - change_nexthops(fi) { - if (!(nh->nh_flags&RTNH_F_DEAD)) { - alive++; - continue; - } - if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) - continue; - if (nh->nh_dev != dev || dev->dn_ptr == NULL) - continue; - alive++; - spin_lock_bh(&dn_fib_multipath_lock); - nh->nh_power = 0; - nh->nh_flags &= ~RTNH_F_DEAD; - spin_unlock_bh(&dn_fib_multipath_lock); - } endfor_nexthops(fi); - - if (alive > 0) { - fi->fib_flags &= ~RTNH_F_DEAD; - ret++; - } - } endfor_fib_info(); - return ret; -} - -static struct notifier_block dn_fib_dnaddr_notifier = { - .notifier_call = dn_fib_dnaddr_event, -}; - -void __exit dn_fib_cleanup(void) -{ - dn_fib_table_cleanup(); - dn_fib_rules_cleanup(); - - unregister_dnaddr_notifier(&dn_fib_dnaddr_notifier); -} - - -void __init dn_fib_init(void) -{ - dn_fib_table_init(); - dn_fib_rules_init(); - - register_dnaddr_notifier(&dn_fib_dnaddr_notifier); - - rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWROUTE, - dn_fib_rtm_newroute, NULL, 0); - rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELROUTE, - dn_fib_rtm_delroute, NULL, 0); -} diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c deleted file mode 100644 index 7c569bcc0aca..000000000000 --- a/net/decnet/dn_neigh.c +++ /dev/null @@ -1,607 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Neighbour Functions (Adjacency Database and - * On-Ethernet Cache) - * - * Author: Steve Whitehouse <SteveW@ACM.org> - * - * - * Changes: - * Steve Whitehouse : Fixed router listing routine - * Steve Whitehouse : Added error_report functions - * Steve Whitehouse : Added default router detection - * Steve Whitehouse : Hop counts in outgoing messages - * Steve Whitehouse : Fixed src/dst in outgoing messages so - * forwarding now stands a good chance of - * working. - * Steve Whitehouse : Fixed neighbour states (for now anyway). - * Steve Whitehouse : Made error_report functions dummies. This - * is not the right place to return skbs. - * Steve Whitehouse : Convert to seq_file - * - */ - -#include <linux/net.h> -#include <linux/module.h> -#include <linux/socket.h> -#include <linux/if_arp.h> -#include <linux/slab.h> -#include <linux/if_ether.h> -#include <linux/init.h> -#include <linux/proc_fs.h> -#include <linux/string.h> -#include <linux/netfilter_decnet.h> -#include <linux/spinlock.h> -#include <linux/seq_file.h> -#include <linux/rcupdate.h> -#include <linux/jhash.h> -#include <linux/atomic.h> -#include <net/net_namespace.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/flow.h> -#include <net/dn.h> -#include <net/dn_dev.h> -#include <net/dn_neigh.h> -#include <net/dn_route.h> - -static int dn_neigh_construct(struct neighbour *); -static void dn_neigh_error_report(struct neighbour *, struct sk_buff *); -static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb); - -/* - * Operations for adding the link layer header. - */ -static const struct neigh_ops dn_neigh_ops = { - .family = AF_DECnet, - .error_report = dn_neigh_error_report, - .output = dn_neigh_output, - .connected_output = dn_neigh_output, -}; - -static u32 dn_neigh_hash(const void *pkey, - const struct net_device *dev, - __u32 *hash_rnd) -{ - return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]); -} - -static bool dn_key_eq(const struct neighbour *neigh, const void *pkey) -{ - return neigh_key_eq16(neigh, pkey); -} - -struct neigh_table dn_neigh_table = { - .family = PF_DECnet, - .entry_size = NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)), - .key_len = sizeof(__le16), - .protocol = cpu_to_be16(ETH_P_DNA_RT), - .hash = dn_neigh_hash, - .key_eq = dn_key_eq, - .constructor = dn_neigh_construct, - .id = "dn_neigh_cache", - .parms ={ - .tbl = &dn_neigh_table, - .reachable_time = 30 * HZ, - .data = { - [NEIGH_VAR_MCAST_PROBES] = 0, - [NEIGH_VAR_UCAST_PROBES] = 0, - [NEIGH_VAR_APP_PROBES] = 0, - [NEIGH_VAR_RETRANS_TIME] = 1 * HZ, - [NEIGH_VAR_BASE_REACHABLE_TIME] = 30 * HZ, - [NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ, - [NEIGH_VAR_INTERVAL_PROBE_TIME_MS] = 5 * HZ, - [NEIGH_VAR_GC_STALETIME] = 60 * HZ, - [NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_MAX, - [NEIGH_VAR_PROXY_QLEN] = 0, - [NEIGH_VAR_ANYCAST_DELAY] = 0, - [NEIGH_VAR_PROXY_DELAY] = 0, - [NEIGH_VAR_LOCKTIME] = 1 * HZ, - }, - }, - .gc_interval = 30 * HZ, - .gc_thresh1 = 128, - .gc_thresh2 = 512, - .gc_thresh3 = 1024, -}; - -static int dn_neigh_construct(struct neighbour *neigh) -{ - struct net_device *dev = neigh->dev; - struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n); - struct dn_dev *dn_db; - struct neigh_parms *parms; - - rcu_read_lock(); - dn_db = rcu_dereference(dev->dn_ptr); - if (dn_db == NULL) { - rcu_read_unlock(); - return -EINVAL; - } - - parms = dn_db->neigh_parms; - if (!parms) { - rcu_read_unlock(); - return -EINVAL; - } - - __neigh_parms_put(neigh->parms); - neigh->parms = neigh_parms_clone(parms); - rcu_read_unlock(); - - neigh->ops = &dn_neigh_ops; - neigh->nud_state = NUD_NOARP; - neigh->output = neigh->ops->connected_output; - - if ((dev->type == ARPHRD_IPGRE) || (dev->flags & IFF_POINTOPOINT)) - memcpy(neigh->ha, dev->broadcast, dev->addr_len); - else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK)) - dn_dn2eth(neigh->ha, dn->addr); - else { - net_dbg_ratelimited("Trying to create neigh for hw %d\n", - dev->type); - return -EINVAL; - } - - /* - * Make an estimate of the remote block size by assuming that its - * two less then the device mtu, which it true for ethernet (and - * other things which support long format headers) since there is - * an extra length field (of 16 bits) which isn't part of the - * ethernet headers and which the DECnet specs won't admit is part - * of the DECnet routing headers either. - * - * If we over estimate here its no big deal, the NSP negotiations - * will prevent us from sending packets which are too large for the - * remote node to handle. In any case this figure is normally updated - * by a hello message in most cases. - */ - dn->blksize = dev->mtu - 2; - - return 0; -} - -static void dn_neigh_error_report(struct neighbour *neigh, struct sk_buff *skb) -{ - printk(KERN_DEBUG "dn_neigh_error_report: called\n"); - kfree_skb(skb); -} - -static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - struct dn_route *rt = (struct dn_route *)dst; - struct net_device *dev = neigh->dev; - char mac_addr[ETH_ALEN]; - unsigned int seq; - int err; - - dn_dn2eth(mac_addr, rt->rt_local_src); - do { - seq = read_seqbegin(&neigh->ha_lock); - err = dev_hard_header(skb, dev, ntohs(skb->protocol), - neigh->ha, mac_addr, skb->len); - } while (read_seqretry(&neigh->ha_lock, seq)); - - if (err >= 0) - err = dev_queue_xmit(skb); - else { - kfree_skb(skb); - err = -EINVAL; - } - return err; -} - -static int dn_neigh_output_packet(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - struct dn_route *rt = (struct dn_route *)dst; - struct neighbour *neigh = rt->n; - - return neigh->output(neigh, skb); -} - -/* - * For talking to broadcast devices: Ethernet & PPP - */ -static int dn_long_output(struct neighbour *neigh, struct sock *sk, - struct sk_buff *skb) -{ - struct net_device *dev = neigh->dev; - int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; - unsigned char *data; - struct dn_long_packet *lp; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - - - if (skb_headroom(skb) < headroom) { - struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); - if (skb2 == NULL) { - net_crit_ratelimited("dn_long_output: no memory\n"); - kfree_skb(skb); - return -ENOBUFS; - } - consume_skb(skb); - skb = skb2; - net_info_ratelimited("dn_long_output: Increasing headroom\n"); - } - - data = skb_push(skb, sizeof(struct dn_long_packet) + 3); - lp = (struct dn_long_packet *)(data+3); - - *((__le16 *)data) = cpu_to_le16(skb->len - 2); - *(data + 2) = 1 | DN_RT_F_PF; /* Padding */ - - lp->msgflg = DN_RT_PKT_LONG|(cb->rt_flags&(DN_RT_F_IE|DN_RT_F_RQR|DN_RT_F_RTS)); - lp->d_area = lp->d_subarea = 0; - dn_dn2eth(lp->d_id, cb->dst); - lp->s_area = lp->s_subarea = 0; - dn_dn2eth(lp->s_id, cb->src); - lp->nl2 = 0; - lp->visit_ct = cb->hops & 0x3f; - lp->s_class = 0; - lp->pt = 0; - - skb_reset_network_header(skb); - - return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, - &init_net, sk, skb, NULL, neigh->dev, - dn_neigh_output_packet); -} - -/* - * For talking to pointopoint and multidrop devices: DDCMP and X.25 - */ -static int dn_short_output(struct neighbour *neigh, struct sock *sk, - struct sk_buff *skb) -{ - struct net_device *dev = neigh->dev; - int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; - struct dn_short_packet *sp; - unsigned char *data; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - - - if (skb_headroom(skb) < headroom) { - struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); - if (skb2 == NULL) { - net_crit_ratelimited("dn_short_output: no memory\n"); - kfree_skb(skb); - return -ENOBUFS; - } - consume_skb(skb); - skb = skb2; - net_info_ratelimited("dn_short_output: Increasing headroom\n"); - } - - data = skb_push(skb, sizeof(struct dn_short_packet) + 2); - *((__le16 *)data) = cpu_to_le16(skb->len - 2); - sp = (struct dn_short_packet *)(data+2); - - sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS)); - sp->dstnode = cb->dst; - sp->srcnode = cb->src; - sp->forward = cb->hops & 0x3f; - - skb_reset_network_header(skb); - - return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, - &init_net, sk, skb, NULL, neigh->dev, - dn_neigh_output_packet); -} - -/* - * For talking to DECnet phase III nodes - * Phase 3 output is the same as short output, execpt that - * it clears the area bits before transmission. - */ -static int dn_phase3_output(struct neighbour *neigh, struct sock *sk, - struct sk_buff *skb) -{ - struct net_device *dev = neigh->dev; - int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; - struct dn_short_packet *sp; - unsigned char *data; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - - if (skb_headroom(skb) < headroom) { - struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); - if (skb2 == NULL) { - net_crit_ratelimited("dn_phase3_output: no memory\n"); - kfree_skb(skb); - return -ENOBUFS; - } - consume_skb(skb); - skb = skb2; - net_info_ratelimited("dn_phase3_output: Increasing headroom\n"); - } - - data = skb_push(skb, sizeof(struct dn_short_packet) + 2); - *((__le16 *)data) = cpu_to_le16(skb->len - 2); - sp = (struct dn_short_packet *)(data + 2); - - sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS)); - sp->dstnode = cb->dst & cpu_to_le16(0x03ff); - sp->srcnode = cb->src & cpu_to_le16(0x03ff); - sp->forward = cb->hops & 0x3f; - - skb_reset_network_header(skb); - - return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, - &init_net, sk, skb, NULL, neigh->dev, - dn_neigh_output_packet); -} - -int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - struct dn_route *rt = (struct dn_route *) dst; - struct neighbour *neigh = rt->n; - struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n); - struct dn_dev *dn_db; - bool use_long; - - rcu_read_lock(); - dn_db = rcu_dereference(neigh->dev->dn_ptr); - if (dn_db == NULL) { - rcu_read_unlock(); - return -EINVAL; - } - use_long = dn_db->use_long; - rcu_read_unlock(); - - if (dn->flags & DN_NDFLAG_P3) - return dn_phase3_output(neigh, sk, skb); - if (use_long) - return dn_long_output(neigh, sk, skb); - else - return dn_short_output(neigh, sk, skb); -} - -/* - * Unfortunately, the neighbour code uses the device in its hash - * function, so we don't get any advantage from it. This function - * basically does a neigh_lookup(), but without comparing the device - * field. This is required for the On-Ethernet cache - */ - -/* - * Pointopoint link receives a hello message - */ -void dn_neigh_pointopoint_hello(struct sk_buff *skb) -{ - kfree_skb(skb); -} - -/* - * Ethernet router hello message received - */ -int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data; - - struct neighbour *neigh; - struct dn_neigh *dn; - struct dn_dev *dn_db; - __le16 src; - - src = dn_eth2dn(msg->id); - - neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1); - - dn = container_of(neigh, struct dn_neigh, n); - - if (neigh) { - write_lock(&neigh->lock); - - neigh->used = jiffies; - dn_db = rcu_dereference(neigh->dev->dn_ptr); - - if (!(neigh->nud_state & NUD_PERMANENT)) { - neigh->updated = jiffies; - - if (neigh->dev->type == ARPHRD_ETHER) - memcpy(neigh->ha, ð_hdr(skb)->h_source, ETH_ALEN); - - dn->blksize = le16_to_cpu(msg->blksize); - dn->priority = msg->priority; - - dn->flags &= ~DN_NDFLAG_P3; - - switch (msg->iinfo & DN_RT_INFO_TYPE) { - case DN_RT_INFO_L1RT: - dn->flags &=~DN_NDFLAG_R2; - dn->flags |= DN_NDFLAG_R1; - break; - case DN_RT_INFO_L2RT: - dn->flags |= DN_NDFLAG_R2; - } - } - - /* Only use routers in our area */ - if ((le16_to_cpu(src)>>10) == (le16_to_cpu((decnet_address))>>10)) { - if (!dn_db->router) { - dn_db->router = neigh_clone(neigh); - } else { - if (msg->priority > container_of(dn_db->router, - struct dn_neigh, n)->priority) - neigh_release(xchg(&dn_db->router, neigh_clone(neigh))); - } - } - write_unlock(&neigh->lock); - neigh_release(neigh); - } - - kfree_skb(skb); - return 0; -} - -/* - * Endnode hello message received - */ -int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data; - struct neighbour *neigh; - struct dn_neigh *dn; - __le16 src; - - src = dn_eth2dn(msg->id); - - neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1); - - dn = container_of(neigh, struct dn_neigh, n); - - if (neigh) { - write_lock(&neigh->lock); - - neigh->used = jiffies; - - if (!(neigh->nud_state & NUD_PERMANENT)) { - neigh->updated = jiffies; - - if (neigh->dev->type == ARPHRD_ETHER) - memcpy(neigh->ha, ð_hdr(skb)->h_source, ETH_ALEN); - dn->flags &= ~(DN_NDFLAG_R1 | DN_NDFLAG_R2); - dn->blksize = le16_to_cpu(msg->blksize); - dn->priority = 0; - } - - write_unlock(&neigh->lock); - neigh_release(neigh); - } - - kfree_skb(skb); - return 0; -} - -static char *dn_find_slot(char *base, int max, int priority) -{ - int i; - unsigned char *min = NULL; - - base += 6; /* skip first id */ - - for(i = 0; i < max; i++) { - if (!min || (*base < *min)) - min = base; - base += 7; /* find next priority */ - } - - if (!min) - return NULL; - - return (*min < priority) ? (min - 6) : NULL; -} - -struct elist_cb_state { - struct net_device *dev; - unsigned char *ptr; - unsigned char *rs; - int t, n; -}; - -static void neigh_elist_cb(struct neighbour *neigh, void *_info) -{ - struct elist_cb_state *s = _info; - struct dn_neigh *dn; - - if (neigh->dev != s->dev) - return; - - dn = container_of(neigh, struct dn_neigh, n); - if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2))) - return; - - if (s->t == s->n) - s->rs = dn_find_slot(s->ptr, s->n, dn->priority); - else - s->t++; - if (s->rs == NULL) - return; - - dn_dn2eth(s->rs, dn->addr); - s->rs += 6; - *(s->rs) = neigh->nud_state & NUD_CONNECTED ? 0x80 : 0x0; - *(s->rs) |= dn->priority; - s->rs++; -} - -int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n) -{ - struct elist_cb_state state; - - state.dev = dev; - state.t = 0; - state.n = n; - state.ptr = ptr; - state.rs = ptr; - - neigh_for_each(&dn_neigh_table, neigh_elist_cb, &state); - - return state.t; -} - - -#ifdef CONFIG_PROC_FS - -static inline void dn_neigh_format_entry(struct seq_file *seq, - struct neighbour *n) -{ - struct dn_neigh *dn = container_of(n, struct dn_neigh, n); - char buf[DN_ASCBUF_LEN]; - - read_lock(&n->lock); - seq_printf(seq, "%-7s %s%s%s %02x %02d %07ld %-8s\n", - dn_addr2asc(le16_to_cpu(dn->addr), buf), - (dn->flags&DN_NDFLAG_R1) ? "1" : "-", - (dn->flags&DN_NDFLAG_R2) ? "2" : "-", - (dn->flags&DN_NDFLAG_P3) ? "3" : "-", - dn->n.nud_state, - refcount_read(&dn->n.refcnt), - dn->blksize, - (dn->n.dev) ? dn->n.dev->name : "?"); - read_unlock(&n->lock); -} - -static int dn_neigh_seq_show(struct seq_file *seq, void *v) -{ - if (v == SEQ_START_TOKEN) { - seq_puts(seq, "Addr Flags State Use Blksize Dev\n"); - } else { - dn_neigh_format_entry(seq, v); - } - - return 0; -} - -static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos) -{ - return neigh_seq_start(seq, pos, &dn_neigh_table, - NEIGH_SEQ_NEIGH_ONLY); -} - -static const struct seq_operations dn_neigh_seq_ops = { - .start = dn_neigh_seq_start, - .next = neigh_seq_next, - .stop = neigh_seq_stop, - .show = dn_neigh_seq_show, -}; -#endif - -void __init dn_neigh_init(void) -{ - neigh_table_init(NEIGH_DN_TABLE, &dn_neigh_table); - proc_create_net("decnet_neigh", 0444, init_net.proc_net, - &dn_neigh_seq_ops, sizeof(struct neigh_seq_state)); -} - -void __exit dn_neigh_cleanup(void) -{ - remove_proc_entry("decnet_neigh", init_net.proc_net); - neigh_table_clear(NEIGH_DN_TABLE, &dn_neigh_table); -} diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c deleted file mode 100644 index c59be5b04479..000000000000 --- a/net/decnet/dn_nsp_in.c +++ /dev/null @@ -1,907 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Network Services Protocol (Input) - * - * Author: Eduardo Marcelo Serrat <emserrat@geocities.com> - * - * Changes: - * - * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from - * original dn_nsp.c. - * Steve Whitehouse: Updated to work with my new routing architecture. - * Steve Whitehouse: Add changes from Eduardo Serrat's patches. - * Steve Whitehouse: Put all ack handling code in a common routine. - * Steve Whitehouse: Put other common bits into dn_nsp_rx() - * Steve Whitehouse: More checks on skb->len to catch bogus packets - * Fixed various race conditions and possible nasties. - * Steve Whitehouse: Now handles returned conninit frames. - * David S. Miller: New socket locking - * Steve Whitehouse: Fixed lockup when socket filtering was enabled. - * Paul Koning: Fix to push CC sockets into RUN when acks are - * received. - * Steve Whitehouse: - * Patrick Caulfield: Checking conninits for correctness & sending of error - * responses. - * Steve Whitehouse: Added backlog congestion level return codes. - * Patrick Caulfield: - * Steve Whitehouse: Added flow control support (outbound) - * Steve Whitehouse: Prepare for nonlinear skbs - */ - -/****************************************************************************** - (c) 1995-1998 E.M. Serrat emserrat@geocities.com - -*******************************************************************************/ - -#include <linux/errno.h> -#include <linux/filter.h> -#include <linux/types.h> -#include <linux/socket.h> -#include <linux/in.h> -#include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/string.h> -#include <linux/sockios.h> -#include <linux/net.h> -#include <linux/netdevice.h> -#include <linux/inet.h> -#include <linux/route.h> -#include <linux/slab.h> -#include <net/sock.h> -#include <net/tcp_states.h> -#include <linux/fcntl.h> -#include <linux/mm.h> -#include <linux/termios.h> -#include <linux/interrupt.h> -#include <linux/proc_fs.h> -#include <linux/stat.h> -#include <linux/init.h> -#include <linux/poll.h> -#include <linux/netfilter_decnet.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/dn.h> -#include <net/dn_nsp.h> -#include <net/dn_dev.h> -#include <net/dn_route.h> - -extern int decnet_log_martians; - -static void dn_log_martian(struct sk_buff *skb, const char *msg) -{ - if (decnet_log_martians) { - char *devname = skb->dev ? skb->dev->name : "???"; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n", - msg, devname, - le16_to_cpu(cb->src), - le16_to_cpu(cb->dst), - le16_to_cpu(cb->src_port), - le16_to_cpu(cb->dst_port)); - } -} - -/* - * For this function we've flipped the cross-subchannel bit - * if the message is an otherdata or linkservice message. Thus - * we can use it to work out what to update. - */ -static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) -{ - struct dn_scp *scp = DN_SK(sk); - unsigned short type = ((ack >> 12) & 0x0003); - int wakeup = 0; - - switch (type) { - case 0: /* ACK - Data */ - if (dn_after(ack, scp->ackrcv_dat)) { - scp->ackrcv_dat = ack & 0x0fff; - wakeup |= dn_nsp_check_xmit_queue(sk, skb, - &scp->data_xmit_queue, - ack); - } - break; - case 1: /* NAK - Data */ - break; - case 2: /* ACK - OtherData */ - if (dn_after(ack, scp->ackrcv_oth)) { - scp->ackrcv_oth = ack & 0x0fff; - wakeup |= dn_nsp_check_xmit_queue(sk, skb, - &scp->other_xmit_queue, - ack); - } - break; - case 3: /* NAK - OtherData */ - break; - } - - if (wakeup && !sock_flag(sk, SOCK_DEAD)) - sk->sk_state_change(sk); -} - -/* - * This function is a universal ack processor. - */ -static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) -{ - __le16 *ptr = (__le16 *)skb->data; - int len = 0; - unsigned short ack; - - if (skb->len < 2) - return len; - - if ((ack = le16_to_cpu(*ptr)) & 0x8000) { - skb_pull(skb, 2); - ptr++; - len += 2; - if ((ack & 0x4000) == 0) { - if (oth) - ack ^= 0x2000; - dn_ack(sk, skb, ack); - } - } - - if (skb->len < 2) - return len; - - if ((ack = le16_to_cpu(*ptr)) & 0x8000) { - skb_pull(skb, 2); - len += 2; - if ((ack & 0x4000) == 0) { - if (oth) - ack ^= 0x2000; - dn_ack(sk, skb, ack); - } - } - - return len; -} - - -/** - * dn_check_idf - Check an image data field format is correct. - * @pptr: Pointer to pointer to image data - * @len: Pointer to length of image data - * @max: The maximum allowed length of the data in the image data field - * @follow_on: Check that this many bytes exist beyond the end of the image data - * - * Returns: 0 if ok, -1 on error - */ -static inline int dn_check_idf(unsigned char **pptr, int *len, unsigned char max, unsigned char follow_on) -{ - unsigned char *ptr = *pptr; - unsigned char flen = *ptr++; - - (*len)--; - if (flen > max) - return -1; - if ((flen + follow_on) > *len) - return -1; - - *len -= flen; - *pptr = ptr + flen; - return 0; -} - -/* - * Table of reason codes to pass back to node which sent us a badly - * formed message, plus text messages for the log. A zero entry in - * the reason field means "don't reply" otherwise a disc init is sent with - * the specified reason code. - */ -static struct { - unsigned short reason; - const char *text; -} ci_err_table[] = { - { 0, "CI: Truncated message" }, - { NSP_REASON_ID, "CI: Destination username error" }, - { NSP_REASON_ID, "CI: Destination username type" }, - { NSP_REASON_US, "CI: Source username error" }, - { 0, "CI: Truncated at menuver" }, - { 0, "CI: Truncated before access or user data" }, - { NSP_REASON_IO, "CI: Access data format error" }, - { NSP_REASON_IO, "CI: User data format error" } -}; - -/* - * This function uses a slightly different lookup method - * to find its sockets, since it searches on object name/number - * rather than port numbers. Various tests are done to ensure that - * the incoming data is in the correct format before it is queued to - * a socket. - */ -static struct sock *dn_find_listener(struct sk_buff *skb, unsigned short *reason) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct nsp_conn_init_msg *msg = (struct nsp_conn_init_msg *)skb->data; - struct sockaddr_dn dstaddr; - struct sockaddr_dn srcaddr; - unsigned char type = 0; - int dstlen; - int srclen; - unsigned char *ptr; - int len; - int err = 0; - unsigned char menuver; - - memset(&dstaddr, 0, sizeof(struct sockaddr_dn)); - memset(&srcaddr, 0, sizeof(struct sockaddr_dn)); - - /* - * 1. Decode & remove message header - */ - cb->src_port = msg->srcaddr; - cb->dst_port = msg->dstaddr; - cb->services = msg->services; - cb->info = msg->info; - cb->segsize = le16_to_cpu(msg->segsize); - - if (!pskb_may_pull(skb, sizeof(*msg))) - goto err_out; - - skb_pull(skb, sizeof(*msg)); - - len = skb->len; - ptr = skb->data; - - /* - * 2. Check destination end username format - */ - dstlen = dn_username2sockaddr(ptr, len, &dstaddr, &type); - err++; - if (dstlen < 0) - goto err_out; - - err++; - if (type > 1) - goto err_out; - - len -= dstlen; - ptr += dstlen; - - /* - * 3. Check source end username format - */ - srclen = dn_username2sockaddr(ptr, len, &srcaddr, &type); - err++; - if (srclen < 0) - goto err_out; - - len -= srclen; - ptr += srclen; - err++; - if (len < 1) - goto err_out; - - menuver = *ptr; - ptr++; - len--; - - /* - * 4. Check that optional data actually exists if menuver says it does - */ - err++; - if ((menuver & (DN_MENUVER_ACC | DN_MENUVER_USR)) && (len < 1)) - goto err_out; - - /* - * 5. Check optional access data format - */ - err++; - if (menuver & DN_MENUVER_ACC) { - if (dn_check_idf(&ptr, &len, 39, 1)) - goto err_out; - if (dn_check_idf(&ptr, &len, 39, 1)) - goto err_out; - if (dn_check_idf(&ptr, &len, 39, (menuver & DN_MENUVER_USR) ? 1 : 0)) - goto err_out; - } - - /* - * 6. Check optional user data format - */ - err++; - if (menuver & DN_MENUVER_USR) { - if (dn_check_idf(&ptr, &len, 16, 0)) - goto err_out; - } - - /* - * 7. Look up socket based on destination end username - */ - return dn_sklist_find_listener(&dstaddr); -err_out: - dn_log_martian(skb, ci_err_table[err].text); - *reason = ci_err_table[err].reason; - return NULL; -} - - -static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb) -{ - if (sk_acceptq_is_full(sk)) { - kfree_skb(skb); - return; - } - - sk_acceptq_added(sk); - skb_queue_tail(&sk->sk_receive_queue, skb); - sk->sk_state_change(sk); -} - -static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct dn_scp *scp = DN_SK(sk); - unsigned char *ptr; - - if (skb->len < 4) - goto out; - - ptr = skb->data; - cb->services = *ptr++; - cb->info = *ptr++; - cb->segsize = le16_to_cpu(*(__le16 *)ptr); - - if ((scp->state == DN_CI) || (scp->state == DN_CD)) { - scp->persist = 0; - scp->addrrem = cb->src_port; - sk->sk_state = TCP_ESTABLISHED; - scp->state = DN_RUN; - scp->services_rem = cb->services; - scp->info_rem = cb->info; - scp->segsize_rem = cb->segsize; - - if ((scp->services_rem & NSP_FC_MASK) == NSP_FC_NONE) - scp->max_window = decnet_no_fc_max_cwnd; - - if (skb->len > 0) { - u16 dlen = *skb->data; - if ((dlen <= 16) && (dlen <= skb->len)) { - scp->conndata_in.opt_optl = cpu_to_le16(dlen); - skb_copy_from_linear_data_offset(skb, 1, - scp->conndata_in.opt_data, dlen); - } - } - dn_nsp_send_link(sk, DN_NOCHANGE, 0); - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_state_change(sk); - } - -out: - kfree_skb(skb); -} - -static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb) -{ - struct dn_scp *scp = DN_SK(sk); - - if (scp->state == DN_CI) { - scp->state = DN_CD; - scp->persist = 0; - } - - kfree_skb(skb); -} - -static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb) -{ - struct dn_scp *scp = DN_SK(sk); - struct dn_skb_cb *cb = DN_SKB_CB(skb); - unsigned short reason; - - if (skb->len < 2) - goto out; - - reason = le16_to_cpu(*(__le16 *)skb->data); - skb_pull(skb, 2); - - scp->discdata_in.opt_status = cpu_to_le16(reason); - scp->discdata_in.opt_optl = 0; - memset(scp->discdata_in.opt_data, 0, 16); - - if (skb->len > 0) { - u16 dlen = *skb->data; - if ((dlen <= 16) && (dlen <= skb->len)) { - scp->discdata_in.opt_optl = cpu_to_le16(dlen); - skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen); - } - } - - scp->addrrem = cb->src_port; - sk->sk_state = TCP_CLOSE; - - switch (scp->state) { - case DN_CI: - case DN_CD: - scp->state = DN_RJ; - sk->sk_err = ECONNREFUSED; - break; - case DN_RUN: - sk->sk_shutdown |= SHUTDOWN_MASK; - scp->state = DN_DN; - break; - case DN_DI: - scp->state = DN_DIC; - break; - } - - if (!sock_flag(sk, SOCK_DEAD)) { - if (sk->sk_socket->state != SS_UNCONNECTED) - sk->sk_socket->state = SS_DISCONNECTING; - sk->sk_state_change(sk); - } - - /* - * It appears that its possible for remote machines to send disc - * init messages with no port identifier if we are in the CI and - * possibly also the CD state. Obviously we shouldn't reply with - * a message if we don't know what the end point is. - */ - if (scp->addrrem) { - dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC); - } - scp->persist_fxn = dn_destroy_timer; - scp->persist = dn_nsp_persist(sk); - -out: - kfree_skb(skb); -} - -/* - * disc_conf messages are also called no_resources or no_link - * messages depending upon the "reason" field. - */ -static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb) -{ - struct dn_scp *scp = DN_SK(sk); - unsigned short reason; - - if (skb->len != 2) - goto out; - - reason = le16_to_cpu(*(__le16 *)skb->data); - - sk->sk_state = TCP_CLOSE; - - switch (scp->state) { - case DN_CI: - scp->state = DN_NR; - break; - case DN_DR: - if (reason == NSP_REASON_DC) - scp->state = DN_DRC; - if (reason == NSP_REASON_NL) - scp->state = DN_CN; - break; - case DN_DI: - scp->state = DN_DIC; - break; - case DN_RUN: - sk->sk_shutdown |= SHUTDOWN_MASK; - fallthrough; - case DN_CC: - scp->state = DN_CN; - } - - if (!sock_flag(sk, SOCK_DEAD)) { - if (sk->sk_socket->state != SS_UNCONNECTED) - sk->sk_socket->state = SS_DISCONNECTING; - sk->sk_state_change(sk); - } - - scp->persist_fxn = dn_destroy_timer; - scp->persist = dn_nsp_persist(sk); - -out: - kfree_skb(skb); -} - -static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb) -{ - struct dn_scp *scp = DN_SK(sk); - unsigned short segnum; - unsigned char lsflags; - signed char fcval; - int wake_up = 0; - char *ptr = skb->data; - unsigned char fctype = scp->services_rem & NSP_FC_MASK; - - if (skb->len != 4) - goto out; - - segnum = le16_to_cpu(*(__le16 *)ptr); - ptr += 2; - lsflags = *(unsigned char *)ptr++; - fcval = *ptr; - - /* - * Here we ignore erroneous packets which should really - * should cause a connection abort. It is not critical - * for now though. - */ - if (lsflags & 0xf8) - goto out; - - if (seq_next(scp->numoth_rcv, segnum)) { - seq_add(&scp->numoth_rcv, 1); - switch(lsflags & 0x04) { /* FCVAL INT */ - case 0x00: /* Normal Request */ - switch(lsflags & 0x03) { /* FCVAL MOD */ - case 0x00: /* Request count */ - if (fcval < 0) { - unsigned char p_fcval = -fcval; - if ((scp->flowrem_dat > p_fcval) && - (fctype == NSP_FC_SCMC)) { - scp->flowrem_dat -= p_fcval; - } - } else if (fcval > 0) { - scp->flowrem_dat += fcval; - wake_up = 1; - } - break; - case 0x01: /* Stop outgoing data */ - scp->flowrem_sw = DN_DONTSEND; - break; - case 0x02: /* Ok to start again */ - scp->flowrem_sw = DN_SEND; - dn_nsp_output(sk); - wake_up = 1; - } - break; - case 0x04: /* Interrupt Request */ - if (fcval > 0) { - scp->flowrem_oth += fcval; - wake_up = 1; - } - break; - } - if (wake_up && !sock_flag(sk, SOCK_DEAD)) - sk->sk_state_change(sk); - } - - dn_nsp_send_oth_ack(sk); - -out: - kfree_skb(skb); -} - -/* - * Copy of sock_queue_rcv_skb (from sock.h) without - * bh_lock_sock() (its already held when this is called) which - * also allows data and other data to be queued to a socket. - */ -static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) -{ - int err; - - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces - number of warnings when compiling with -W --ANK - */ - if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned int)sk->sk_rcvbuf) { - err = -ENOMEM; - goto out; - } - - err = sk_filter(sk, skb); - if (err) - goto out; - - skb_set_owner_r(skb, sk); - skb_queue_tail(queue, skb); - - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk); -out: - return err; -} - -static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb) -{ - struct dn_scp *scp = DN_SK(sk); - unsigned short segnum; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - int queued = 0; - - if (skb->len < 2) - goto out; - - cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data); - skb_pull(skb, 2); - - if (seq_next(scp->numoth_rcv, segnum)) { - - if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) { - seq_add(&scp->numoth_rcv, 1); - scp->other_report = 0; - queued = 1; - } - } - - dn_nsp_send_oth_ack(sk); -out: - if (!queued) - kfree_skb(skb); -} - -static void dn_nsp_data(struct sock *sk, struct sk_buff *skb) -{ - int queued = 0; - unsigned short segnum; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct dn_scp *scp = DN_SK(sk); - - if (skb->len < 2) - goto out; - - cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data); - skb_pull(skb, 2); - - if (seq_next(scp->numdat_rcv, segnum)) { - if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) { - seq_add(&scp->numdat_rcv, 1); - queued = 1; - } - - if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) { - scp->flowloc_sw = DN_DONTSEND; - dn_nsp_send_link(sk, DN_DONTSEND, 0); - } - } - - dn_nsp_send_data_ack(sk); -out: - if (!queued) - kfree_skb(skb); -} - -/* - * If one of our conninit messages is returned, this function - * deals with it. It puts the socket into the NO_COMMUNICATION - * state. - */ -static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb) -{ - struct dn_scp *scp = DN_SK(sk); - - if (scp->state == DN_CI) { - scp->state = DN_NC; - sk->sk_state = TCP_CLOSE; - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_state_change(sk); - } - - kfree_skb(skb); -} - -static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - int ret = NET_RX_DROP; - - /* Must not reply to returned packets */ - if (cb->rt_flags & DN_RT_F_RTS) - goto out; - - if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) { - switch (cb->nsp_flags & 0x70) { - case 0x10: - case 0x60: /* (Retransmitted) Connect Init */ - dn_nsp_return_disc(skb, NSP_DISCINIT, reason); - ret = NET_RX_SUCCESS; - break; - case 0x20: /* Connect Confirm */ - dn_nsp_return_disc(skb, NSP_DISCCONF, reason); - ret = NET_RX_SUCCESS; - break; - } - } - -out: - kfree_skb(skb); - return ret; -} - -static int dn_nsp_rx_packet(struct net *net, struct sock *sk2, - struct sk_buff *skb) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct sock *sk = NULL; - unsigned char *ptr = (unsigned char *)skb->data; - unsigned short reason = NSP_REASON_NL; - - if (!pskb_may_pull(skb, 2)) - goto free_out; - - skb_reset_transport_header(skb); - cb->nsp_flags = *ptr++; - - if (decnet_debug_level & 2) - printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x\n", (int)cb->nsp_flags); - - if (cb->nsp_flags & 0x83) - goto free_out; - - /* - * Filter out conninits and useless packet types - */ - if ((cb->nsp_flags & 0x0c) == 0x08) { - switch (cb->nsp_flags & 0x70) { - case 0x00: /* NOP */ - case 0x70: /* Reserved */ - case 0x50: /* Reserved, Phase II node init */ - goto free_out; - case 0x10: - case 0x60: - if (unlikely(cb->rt_flags & DN_RT_F_RTS)) - goto free_out; - sk = dn_find_listener(skb, &reason); - goto got_it; - } - } - - if (!pskb_may_pull(skb, 3)) - goto free_out; - - /* - * Grab the destination address. - */ - cb->dst_port = *(__le16 *)ptr; - cb->src_port = 0; - ptr += 2; - - /* - * If not a connack, grab the source address too. - */ - if (pskb_may_pull(skb, 5)) { - cb->src_port = *(__le16 *)ptr; - ptr += 2; - skb_pull(skb, 5); - } - - /* - * Returned packets... - * Swap src & dst and look up in the normal way. - */ - if (unlikely(cb->rt_flags & DN_RT_F_RTS)) { - swap(cb->dst_port, cb->src_port); - swap(cb->dst, cb->src); - } - - /* - * Find the socket to which this skb is destined. - */ - sk = dn_find_by_skb(skb); -got_it: - if (sk != NULL) { - struct dn_scp *scp = DN_SK(sk); - - /* Reset backoff */ - scp->nsp_rxtshift = 0; - - /* - * We linearize everything except data segments here. - */ - if (cb->nsp_flags & ~0x60) { - if (unlikely(skb_linearize(skb))) - goto free_out; - } - - return sk_receive_skb(sk, skb, 0); - } - - return dn_nsp_no_socket(skb, reason); - -free_out: - kfree_skb(skb); - return NET_RX_DROP; -} - -int dn_nsp_rx(struct sk_buff *skb) -{ - return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, - &init_net, NULL, skb, skb->dev, NULL, - dn_nsp_rx_packet); -} - -/* - * This is the main receive routine for sockets. It is called - * from the above when the socket is not busy, and also from - * sock_release() when there is a backlog queued up. - */ -int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb) -{ - struct dn_scp *scp = DN_SK(sk); - struct dn_skb_cb *cb = DN_SKB_CB(skb); - - if (cb->rt_flags & DN_RT_F_RTS) { - if (cb->nsp_flags == 0x18 || cb->nsp_flags == 0x68) - dn_returned_conn_init(sk, skb); - else - kfree_skb(skb); - return NET_RX_SUCCESS; - } - - /* - * Control packet. - */ - if ((cb->nsp_flags & 0x0c) == 0x08) { - switch (cb->nsp_flags & 0x70) { - case 0x10: - case 0x60: - dn_nsp_conn_init(sk, skb); - break; - case 0x20: - dn_nsp_conn_conf(sk, skb); - break; - case 0x30: - dn_nsp_disc_init(sk, skb); - break; - case 0x40: - dn_nsp_disc_conf(sk, skb); - break; - } - - } else if (cb->nsp_flags == 0x24) { - /* - * Special for connacks, 'cos they don't have - * ack data or ack otherdata info. - */ - dn_nsp_conn_ack(sk, skb); - } else { - int other = 1; - - /* both data and ack frames can kick a CC socket into RUN */ - if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) { - scp->state = DN_RUN; - sk->sk_state = TCP_ESTABLISHED; - sk->sk_state_change(sk); - } - - if ((cb->nsp_flags & 0x1c) == 0) - other = 0; - if (cb->nsp_flags == 0x04) - other = 0; - - /* - * Read out ack data here, this applies equally - * to data, other data, link service and both - * ack data and ack otherdata. - */ - dn_process_ack(sk, skb, other); - - /* - * If we've some sort of data here then call a - * suitable routine for dealing with it, otherwise - * the packet is an ack and can be discarded. - */ - if ((cb->nsp_flags & 0x0c) == 0) { - - if (scp->state != DN_RUN) - goto free_out; - - switch (cb->nsp_flags) { - case 0x10: /* LS */ - dn_nsp_linkservice(sk, skb); - break; - case 0x30: /* OD */ - dn_nsp_otherdata(sk, skb); - break; - default: - dn_nsp_data(sk, skb); - } - - } else { /* Ack, chuck it out here */ -free_out: - kfree_skb(skb); - } - } - - return NET_RX_SUCCESS; -} diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c deleted file mode 100644 index b05639bdfc8f..000000000000 --- a/net/decnet/dn_nsp_out.c +++ /dev/null @@ -1,696 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Network Services Protocol (Output) - * - * Author: Eduardo Marcelo Serrat <emserrat@geocities.com> - * - * Changes: - * - * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from - * original dn_nsp.c. - * Steve Whitehouse: Updated to work with my new routing architecture. - * Steve Whitehouse: Added changes from Eduardo Serrat's patches. - * Steve Whitehouse: Now conninits have the "return" bit set. - * Steve Whitehouse: Fixes to check alloc'd skbs are non NULL! - * Moved output state machine into one function - * Steve Whitehouse: New output state machine - * Paul Koning: Connect Confirm message fix. - * Eduardo Serrat: Fix to stop dn_nsp_do_disc() sending malformed packets. - * Steve Whitehouse: dn_nsp_output() and friends needed a spring clean - * Steve Whitehouse: Moved dn_nsp_send() in here from route.h - */ - -/****************************************************************************** - (c) 1995-1998 E.M. Serrat emserrat@geocities.com - -*******************************************************************************/ - -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/socket.h> -#include <linux/in.h> -#include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/string.h> -#include <linux/sockios.h> -#include <linux/net.h> -#include <linux/netdevice.h> -#include <linux/inet.h> -#include <linux/route.h> -#include <linux/slab.h> -#include <net/sock.h> -#include <linux/fcntl.h> -#include <linux/mm.h> -#include <linux/termios.h> -#include <linux/interrupt.h> -#include <linux/proc_fs.h> -#include <linux/stat.h> -#include <linux/init.h> -#include <linux/poll.h> -#include <linux/if_packet.h> -#include <linux/jiffies.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/flow.h> -#include <net/dn.h> -#include <net/dn_nsp.h> -#include <net/dn_dev.h> -#include <net/dn_route.h> - - -static int nsp_backoff[NSP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; - -static void dn_nsp_send(struct sk_buff *skb) -{ - struct sock *sk = skb->sk; - struct dn_scp *scp = DN_SK(sk); - struct dst_entry *dst; - struct flowidn fld; - - skb_reset_transport_header(skb); - scp->stamp = jiffies; - - dst = sk_dst_check(sk, 0); - if (dst) { -try_again: - skb_dst_set(skb, dst); - dst_output(&init_net, skb->sk, skb); - return; - } - - memset(&fld, 0, sizeof(fld)); - fld.flowidn_oif = sk->sk_bound_dev_if; - fld.saddr = dn_saddr2dn(&scp->addr); - fld.daddr = dn_saddr2dn(&scp->peer); - dn_sk_ports_copy(&fld, scp); - fld.flowidn_proto = DNPROTO_NSP; - if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) { - dst = sk_dst_get(sk); - sk->sk_route_caps = dst->dev->features; - goto try_again; - } - - sk->sk_err = EHOSTUNREACH; - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_state_change(sk); -} - - -/* - * If sk == NULL, then we assume that we are supposed to be making - * a routing layer skb. If sk != NULL, then we are supposed to be - * creating an skb for the NSP layer. - * - * The eventual aim is for each socket to have a cached header size - * for its outgoing packets, and to set hdr from this when sk != NULL. - */ -struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri) -{ - struct sk_buff *skb; - int hdr = 64; - - if ((skb = alloc_skb(size + hdr, pri)) == NULL) - return NULL; - - skb->protocol = htons(ETH_P_DNA_RT); - skb->pkt_type = PACKET_OUTGOING; - - if (sk) - skb_set_owner_w(skb, sk); - - skb_reserve(skb, hdr); - - return skb; -} - -/* - * Calculate persist timer based upon the smoothed round - * trip time and the variance. Backoff according to the - * nsp_backoff[] array. - */ -unsigned long dn_nsp_persist(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1; - - t *= nsp_backoff[scp->nsp_rxtshift]; - - if (t < HZ) t = HZ; - if (t > (600*HZ)) t = (600*HZ); - - if (scp->nsp_rxtshift < NSP_MAXRXTSHIFT) - scp->nsp_rxtshift++; - - /* printk(KERN_DEBUG "rxtshift %lu, t=%lu\n", scp->nsp_rxtshift, t); */ - - return t; -} - -/* - * This is called each time we get an estimate for the rtt - * on the link. - */ -static void dn_nsp_rtt(struct sock *sk, long rtt) -{ - struct dn_scp *scp = DN_SK(sk); - long srtt = (long)scp->nsp_srtt; - long rttvar = (long)scp->nsp_rttvar; - long delta; - - /* - * If the jiffies clock flips over in the middle of timestamp - * gathering this value might turn out negative, so we make sure - * that is it always positive here. - */ - if (rtt < 0) - rtt = -rtt; - /* - * Add new rtt to smoothed average - */ - delta = ((rtt << 3) - srtt); - srtt += (delta >> 3); - if (srtt >= 1) - scp->nsp_srtt = (unsigned long)srtt; - else - scp->nsp_srtt = 1; - - /* - * Add new rtt variance to smoothed varience - */ - delta >>= 1; - rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2); - if (rttvar >= 1) - scp->nsp_rttvar = (unsigned long)rttvar; - else - scp->nsp_rttvar = 1; - - /* printk(KERN_DEBUG "srtt=%lu rttvar=%lu\n", scp->nsp_srtt, scp->nsp_rttvar); */ -} - -/** - * dn_nsp_clone_and_send - Send a data packet by cloning it - * @skb: The packet to clone and transmit - * @gfp: memory allocation flag - * - * Clone a queued data or other data packet and transmit it. - * - * Returns: The number of times the packet has been sent previously - */ -static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb, - gfp_t gfp) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct sk_buff *skb2; - int ret = 0; - - if ((skb2 = skb_clone(skb, gfp)) != NULL) { - ret = cb->xmit_count; - cb->xmit_count++; - cb->stamp = jiffies; - skb2->sk = skb->sk; - dn_nsp_send(skb2); - } - - return ret; -} - -/** - * dn_nsp_output - Try and send something from socket queues - * @sk: The socket whose queues are to be investigated - * - * Try and send the packet on the end of the data and other data queues. - * Other data gets priority over data, and if we retransmit a packet we - * reduce the window by dividing it in two. - * - */ -void dn_nsp_output(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - struct sk_buff *skb; - unsigned int reduce_win = 0; - - /* - * First we check for otherdata/linkservice messages - */ - if ((skb = skb_peek(&scp->other_xmit_queue)) != NULL) - reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC); - - /* - * If we may not send any data, we don't. - * If we are still trying to get some other data down the - * channel, we don't try and send any data. - */ - if (reduce_win || (scp->flowrem_sw != DN_SEND)) - goto recalc_window; - - if ((skb = skb_peek(&scp->data_xmit_queue)) != NULL) - reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC); - - /* - * If we've sent any frame more than once, we cut the - * send window size in half. There is always a minimum - * window size of one available. - */ -recalc_window: - if (reduce_win) { - scp->snd_window >>= 1; - if (scp->snd_window < NSP_MIN_WINDOW) - scp->snd_window = NSP_MIN_WINDOW; - } -} - -int dn_nsp_xmit_timeout(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - dn_nsp_output(sk); - - if (!skb_queue_empty(&scp->data_xmit_queue) || - !skb_queue_empty(&scp->other_xmit_queue)) - scp->persist = dn_nsp_persist(sk); - - return 0; -} - -static inline __le16 *dn_mk_common_header(struct dn_scp *scp, struct sk_buff *skb, unsigned char msgflag, int len) -{ - unsigned char *ptr = skb_push(skb, len); - - BUG_ON(len < 5); - - *ptr++ = msgflag; - *((__le16 *)ptr) = scp->addrrem; - ptr += 2; - *((__le16 *)ptr) = scp->addrloc; - ptr += 2; - return (__le16 __force *)ptr; -} - -static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other) -{ - struct dn_scp *scp = DN_SK(sk); - unsigned short acknum = scp->numdat_rcv & 0x0FFF; - unsigned short ackcrs = scp->numoth_rcv & 0x0FFF; - __le16 *ptr; - - BUG_ON(hlen < 9); - - scp->ackxmt_dat = acknum; - scp->ackxmt_oth = ackcrs; - acknum |= 0x8000; - ackcrs |= 0x8000; - - /* If this is an "other data/ack" message, swap acknum and ackcrs */ - if (other) - swap(acknum, ackcrs); - - /* Set "cross subchannel" bit in ackcrs */ - ackcrs |= 0x2000; - - ptr = dn_mk_common_header(scp, skb, msgflag, hlen); - - *ptr++ = cpu_to_le16(acknum); - *ptr++ = cpu_to_le16(ackcrs); - - return ptr; -} - -static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth) -{ - struct dn_scp *scp = DN_SK(sk); - struct dn_skb_cb *cb = DN_SKB_CB(skb); - __le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth); - - if (unlikely(oth)) { - cb->segnum = scp->numoth; - seq_add(&scp->numoth, 1); - } else { - cb->segnum = scp->numdat; - seq_add(&scp->numdat, 1); - } - *(ptr++) = cpu_to_le16(cb->segnum); - - return ptr; -} - -void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, - gfp_t gfp, int oth) -{ - struct dn_scp *scp = DN_SK(sk); - struct dn_skb_cb *cb = DN_SKB_CB(skb); - unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1; - - cb->xmit_count = 0; - dn_nsp_mk_data_header(sk, skb, oth); - - /* - * Slow start: If we have been idle for more than - * one RTT, then reset window to min size. - */ - if (time_is_before_jiffies(scp->stamp + t)) - scp->snd_window = NSP_MIN_WINDOW; - - if (oth) - skb_queue_tail(&scp->other_xmit_queue, skb); - else - skb_queue_tail(&scp->data_xmit_queue, skb); - - if (scp->flowrem_sw != DN_SEND) - return; - - dn_nsp_clone_and_send(skb, gfp); -} - - -int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct dn_scp *scp = DN_SK(sk); - struct sk_buff *skb2, *n, *ack = NULL; - int wakeup = 0; - int try_retrans = 0; - unsigned long reftime = cb->stamp; - unsigned long pkttime; - unsigned short xmit_count; - unsigned short segnum; - - skb_queue_walk_safe(q, skb2, n) { - struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); - - if (dn_before_or_equal(cb2->segnum, acknum)) - ack = skb2; - - /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */ - - if (ack == NULL) - continue; - - /* printk(KERN_DEBUG "check_xmit_queue: %04x, %d\n", acknum, cb2->xmit_count); */ - - /* Does _last_ packet acked have xmit_count > 1 */ - try_retrans = 0; - /* Remember to wake up the sending process */ - wakeup = 1; - /* Keep various statistics */ - pkttime = cb2->stamp; - xmit_count = cb2->xmit_count; - segnum = cb2->segnum; - /* Remove and drop ack'ed packet */ - skb_unlink(ack, q); - kfree_skb(ack); - ack = NULL; - - /* - * We don't expect to see acknowledgements for packets we - * haven't sent yet. - */ - WARN_ON(xmit_count == 0); - - /* - * If the packet has only been sent once, we can use it - * to calculate the RTT and also open the window a little - * further. - */ - if (xmit_count == 1) { - if (dn_equal(segnum, acknum)) - dn_nsp_rtt(sk, (long)(pkttime - reftime)); - - if (scp->snd_window < scp->max_window) - scp->snd_window++; - } - - /* - * Packet has been sent more than once. If this is the last - * packet to be acknowledged then we want to send the next - * packet in the send queue again (assumes the remote host does - * go-back-N error control). - */ - if (xmit_count > 1) - try_retrans = 1; - } - - if (try_retrans) - dn_nsp_output(sk); - - return wakeup; -} - -void dn_nsp_send_data_ack(struct sock *sk) -{ - struct sk_buff *skb = NULL; - - if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL) - return; - - skb_reserve(skb, 9); - dn_mk_ack_header(sk, skb, 0x04, 9, 0); - dn_nsp_send(skb); -} - -void dn_nsp_send_oth_ack(struct sock *sk) -{ - struct sk_buff *skb = NULL; - - if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL) - return; - - skb_reserve(skb, 9); - dn_mk_ack_header(sk, skb, 0x14, 9, 1); - dn_nsp_send(skb); -} - - -void dn_send_conn_ack (struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - struct sk_buff *skb = NULL; - struct nsp_conn_ack_msg *msg; - - if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL) - return; - - msg = skb_put(skb, 3); - msg->msgflg = 0x24; - msg->dstaddr = scp->addrrem; - - dn_nsp_send(skb); -} - -static int dn_nsp_retrans_conn_conf(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - if (scp->state == DN_CC) - dn_send_conn_conf(sk, GFP_ATOMIC); - - return 0; -} - -void dn_send_conn_conf(struct sock *sk, gfp_t gfp) -{ - struct dn_scp *scp = DN_SK(sk); - struct sk_buff *skb = NULL; - struct nsp_conn_init_msg *msg; - __u8 len = (__u8)le16_to_cpu(scp->conndata_out.opt_optl); - - if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL) - return; - - msg = skb_put(skb, sizeof(*msg)); - msg->msgflg = 0x28; - msg->dstaddr = scp->addrrem; - msg->srcaddr = scp->addrloc; - msg->services = scp->services_loc; - msg->info = scp->info_loc; - msg->segsize = cpu_to_le16(scp->segsize_loc); - - skb_put_u8(skb, len); - - if (len > 0) - skb_put_data(skb, scp->conndata_out.opt_data, len); - - - dn_nsp_send(skb); - - scp->persist = dn_nsp_persist(sk); - scp->persist_fxn = dn_nsp_retrans_conn_conf; -} - - -static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, - unsigned short reason, gfp_t gfp, - struct dst_entry *dst, - int ddl, unsigned char *dd, __le16 rem, __le16 loc) -{ - struct sk_buff *skb = NULL; - int size = 7 + ddl + ((msgflg == NSP_DISCINIT) ? 1 : 0); - unsigned char *msg; - - if ((dst == NULL) || (rem == 0)) { - net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", - le16_to_cpu(rem), dst); - return; - } - - if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL) - return; - - msg = skb_put(skb, size); - *msg++ = msgflg; - *(__le16 *)msg = rem; - msg += 2; - *(__le16 *)msg = loc; - msg += 2; - *(__le16 *)msg = cpu_to_le16(reason); - msg += 2; - if (msgflg == NSP_DISCINIT) - *msg++ = ddl; - - if (ddl) { - memcpy(msg, dd, ddl); - } - - /* - * This doesn't go via the dn_nsp_send() function since we need - * to be able to send disc packets out which have no socket - * associations. - */ - skb_dst_set(skb, dst_clone(dst)); - dst_output(&init_net, skb->sk, skb); -} - - -void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, - unsigned short reason, gfp_t gfp) -{ - struct dn_scp *scp = DN_SK(sk); - int ddl = 0; - - if (msgflg == NSP_DISCINIT) - ddl = le16_to_cpu(scp->discdata_out.opt_optl); - - if (reason == 0) - reason = le16_to_cpu(scp->discdata_out.opt_status); - - dn_nsp_do_disc(sk, msgflg, reason, gfp, __sk_dst_get(sk), ddl, - scp->discdata_out.opt_data, scp->addrrem, scp->addrloc); -} - - -void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg, - unsigned short reason) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - int ddl = 0; - gfp_t gfp = GFP_ATOMIC; - - dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb_dst(skb), ddl, - NULL, cb->src_port, cb->dst_port); -} - - -void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval) -{ - struct dn_scp *scp = DN_SK(sk); - struct sk_buff *skb; - unsigned char *ptr; - gfp_t gfp = GFP_ATOMIC; - - if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) - return; - - skb_reserve(skb, DN_MAX_NSP_DATA_HEADER); - ptr = skb_put(skb, 2); - DN_SKB_CB(skb)->nsp_flags = 0x10; - *ptr++ = lsflags; - *ptr = fcval; - - dn_nsp_queue_xmit(sk, skb, gfp, 1); - - scp->persist = dn_nsp_persist(sk); - scp->persist_fxn = dn_nsp_xmit_timeout; -} - -static int dn_nsp_retrans_conninit(struct sock *sk) -{ - struct dn_scp *scp = DN_SK(sk); - - if (scp->state == DN_CI) - dn_nsp_send_conninit(sk, NSP_RCI); - - return 0; -} - -void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) -{ - struct dn_scp *scp = DN_SK(sk); - struct nsp_conn_init_msg *msg; - unsigned char aux; - unsigned char menuver; - struct dn_skb_cb *cb; - unsigned char type = 1; - gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; - struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); - - if (!skb) - return; - - cb = DN_SKB_CB(skb); - msg = skb_put(skb, sizeof(*msg)); - - msg->msgflg = msgflg; - msg->dstaddr = 0x0000; /* Remote Node will assign it*/ - - msg->srcaddr = scp->addrloc; - msg->services = scp->services_loc; /* Requested flow control */ - msg->info = scp->info_loc; /* Version Number */ - msg->segsize = cpu_to_le16(scp->segsize_loc); /* Max segment size */ - - if (scp->peer.sdn_objnum) - type = 0; - - skb_put(skb, dn_sockaddr2username(&scp->peer, - skb_tail_pointer(skb), type)); - skb_put(skb, dn_sockaddr2username(&scp->addr, - skb_tail_pointer(skb), 2)); - - menuver = DN_MENUVER_ACC | DN_MENUVER_USR; - if (scp->peer.sdn_flags & SDF_PROXY) - menuver |= DN_MENUVER_PRX; - if (scp->peer.sdn_flags & SDF_UICPROXY) - menuver |= DN_MENUVER_UIC; - - skb_put_u8(skb, menuver); /* Menu Version */ - - aux = scp->accessdata.acc_userl; - skb_put_u8(skb, aux); - if (aux > 0) - skb_put_data(skb, scp->accessdata.acc_user, aux); - - aux = scp->accessdata.acc_passl; - skb_put_u8(skb, aux); - if (aux > 0) - skb_put_data(skb, scp->accessdata.acc_pass, aux); - - aux = scp->accessdata.acc_accl; - skb_put_u8(skb, aux); - if (aux > 0) - skb_put_data(skb, scp->accessdata.acc_acc, aux); - - aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl); - skb_put_u8(skb, aux); - if (aux > 0) - skb_put_data(skb, scp->conndata_out.opt_data, aux); - - scp->persist = dn_nsp_persist(sk); - scp->persist_fxn = dn_nsp_retrans_conninit; - - cb->rt_flags = DN_RT_F_RQR; - - dn_nsp_send(skb); -} diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c deleted file mode 100644 index ac2ee1689111..000000000000 --- a/net/decnet/dn_route.c +++ /dev/null @@ -1,1922 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Routing Functions (Endnode and Router) - * - * Authors: Steve Whitehouse <SteveW@ACM.org> - * Eduardo Marcelo Serrat <emserrat@geocities.com> - * - * Changes: - * Steve Whitehouse : Fixes to allow "intra-ethernet" and - * "return-to-sender" bits on outgoing - * packets. - * Steve Whitehouse : Timeouts for cached routes. - * Steve Whitehouse : Use dst cache for input routes too. - * Steve Whitehouse : Fixed error values in dn_send_skb. - * Steve Whitehouse : Rework routing functions to better fit - * DECnet routing design - * Alexey Kuznetsov : New SMP locking - * Steve Whitehouse : More SMP locking changes & dn_cache_dump() - * Steve Whitehouse : Prerouting NF hook, now really is prerouting. - * Fixed possible skb leak in rtnetlink funcs. - * Steve Whitehouse : Dave Miller's dynamic hash table sizing and - * Alexey Kuznetsov's finer grained locking - * from ipv4/route.c. - * Steve Whitehouse : Routing is now starting to look like a - * sensible set of code now, mainly due to - * my copying the IPv4 routing code. The - * hooks here are modified and will continue - * to evolve for a while. - * Steve Whitehouse : Real SMP at last :-) Also new netfilter - * stuff. Look out raw sockets your days - * are numbered! - * Steve Whitehouse : Added return-to-sender functions. Added - * backlog congestion level return codes. - * Steve Whitehouse : Fixed bug where routes were set up with - * no ref count on net devices. - * Steve Whitehouse : RCU for the route cache - * Steve Whitehouse : Preparations for the flow cache - * Steve Whitehouse : Prepare for nonlinear skbs - */ - -/****************************************************************************** - (c) 1995-1998 E.M. Serrat emserrat@geocities.com - -*******************************************************************************/ - -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/socket.h> -#include <linux/in.h> -#include <linux/kernel.h> -#include <linux/sockios.h> -#include <linux/net.h> -#include <linux/netdevice.h> -#include <linux/inet.h> -#include <linux/route.h> -#include <linux/in_route.h> -#include <linux/slab.h> -#include <net/sock.h> -#include <linux/mm.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/rtnetlink.h> -#include <linux/string.h> -#include <linux/netfilter_decnet.h> -#include <linux/rcupdate.h> -#include <linux/times.h> -#include <linux/export.h> -#include <asm/errno.h> -#include <net/net_namespace.h> -#include <net/netlink.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/flow.h> -#include <net/fib_rules.h> -#include <net/dn.h> -#include <net/dn_dev.h> -#include <net/dn_nsp.h> -#include <net/dn_route.h> -#include <net/dn_neigh.h> -#include <net/dn_fib.h> - -struct dn_rt_hash_bucket { - struct dn_route __rcu *chain; - spinlock_t lock; -}; - -extern struct neigh_table dn_neigh_table; - - -static unsigned char dn_hiord_addr[6] = {0xAA, 0x00, 0x04, 0x00, 0x00, 0x00}; - -static const int dn_rt_min_delay = 2 * HZ; -static const int dn_rt_max_delay = 10 * HZ; -static const int dn_rt_mtu_expires = 10 * 60 * HZ; - -static unsigned long dn_rt_deadline; - -static int dn_dst_gc(struct dst_ops *ops); -static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); -static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); -static unsigned int dn_dst_mtu(const struct dst_entry *dst); -static void dn_dst_destroy(struct dst_entry *); -static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how); -static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); -static void dn_dst_link_failure(struct sk_buff *); -static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb , u32 mtu, - bool confirm_neigh); -static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb); -static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, - struct sk_buff *skb, - const void *daddr); -static int dn_route_input(struct sk_buff *); -static void dn_run_flush(struct timer_list *unused); - -static struct dn_rt_hash_bucket *dn_rt_hash_table; -static unsigned int dn_rt_hash_mask; - -static struct timer_list dn_route_timer; -static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush); -int decnet_dst_gc_interval = 2; - -static struct dst_ops dn_dst_ops = { - .family = PF_DECnet, - .gc_thresh = 128, - .gc = dn_dst_gc, - .check = dn_dst_check, - .default_advmss = dn_dst_default_advmss, - .mtu = dn_dst_mtu, - .cow_metrics = dst_cow_metrics_generic, - .destroy = dn_dst_destroy, - .ifdown = dn_dst_ifdown, - .negative_advice = dn_dst_negative_advice, - .link_failure = dn_dst_link_failure, - .update_pmtu = dn_dst_update_pmtu, - .redirect = dn_dst_redirect, - .neigh_lookup = dn_dst_neigh_lookup, -}; - -static void dn_dst_destroy(struct dst_entry *dst) -{ - struct dn_route *rt = (struct dn_route *) dst; - - if (rt->n) - neigh_release(rt->n); - dst_destroy_metrics_generic(dst); -} - -static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how) -{ - if (how) { - struct dn_route *rt = (struct dn_route *) dst; - struct neighbour *n = rt->n; - - if (n && n->dev == dev) { - n->dev = blackhole_netdev; - dev_hold(n->dev); - dev_put(dev); - } - } -} - -static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) -{ - __u16 tmp = (__u16 __force)(src ^ dst); - tmp ^= (tmp >> 3); - tmp ^= (tmp >> 5); - tmp ^= (tmp >> 10); - return dn_rt_hash_mask & (unsigned int)tmp; -} - -static void dn_dst_check_expire(struct timer_list *unused) -{ - int i; - struct dn_route *rt; - struct dn_route __rcu **rtp; - unsigned long now = jiffies; - unsigned long expire = 120 * HZ; - - for (i = 0; i <= dn_rt_hash_mask; i++) { - rtp = &dn_rt_hash_table[i].chain; - - spin_lock(&dn_rt_hash_table[i].lock); - while ((rt = rcu_dereference_protected(*rtp, - lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { - if (atomic_read(&rt->dst.__refcnt) > 1 || - (now - rt->dst.lastuse) < expire) { - rtp = &rt->dn_next; - continue; - } - *rtp = rt->dn_next; - rt->dn_next = NULL; - dst_dev_put(&rt->dst); - dst_release(&rt->dst); - } - spin_unlock(&dn_rt_hash_table[i].lock); - - if (jiffies != now) - break; - } - - mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ); -} - -static int dn_dst_gc(struct dst_ops *ops) -{ - struct dn_route *rt; - struct dn_route __rcu **rtp; - int i; - unsigned long now = jiffies; - unsigned long expire = 10 * HZ; - - for (i = 0; i <= dn_rt_hash_mask; i++) { - - spin_lock_bh(&dn_rt_hash_table[i].lock); - rtp = &dn_rt_hash_table[i].chain; - - while ((rt = rcu_dereference_protected(*rtp, - lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { - if (atomic_read(&rt->dst.__refcnt) > 1 || - (now - rt->dst.lastuse) < expire) { - rtp = &rt->dn_next; - continue; - } - *rtp = rt->dn_next; - rt->dn_next = NULL; - dst_dev_put(&rt->dst); - dst_release(&rt->dst); - break; - } - spin_unlock_bh(&dn_rt_hash_table[i].lock); - } - - return 0; -} - -/* - * The decnet standards don't impose a particular minimum mtu, what they - * do insist on is that the routing layer accepts a datagram of at least - * 230 bytes long. Here we have to subtract the routing header length from - * 230 to get the minimum acceptable mtu. If there is no neighbour, then we - * assume the worst and use a long header size. - * - * We update both the mtu and the advertised mss (i.e. the segment size we - * advertise to the other end). - */ -static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu, - bool confirm_neigh) -{ - struct dn_route *rt = (struct dn_route *) dst; - struct neighbour *n = rt->n; - u32 min_mtu = 230; - struct dn_dev *dn; - - dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL; - - if (dn && dn->use_long == 0) - min_mtu -= 6; - else - min_mtu -= 21; - - if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { - if (!(dst_metric_locked(dst, RTAX_MTU))) { - dst_metric_set(dst, RTAX_MTU, mtu); - dst_set_expires(dst, dn_rt_mtu_expires); - } - if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { - u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; - u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS); - if (!existing_mss || existing_mss > mss) - dst_metric_set(dst, RTAX_ADVMSS, mss); - } - } -} - -static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb) -{ -} - -/* - * When a route has been marked obsolete. (e.g. routing cache flush) - */ -static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) -{ - return NULL; -} - -static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) -{ - dst_release(dst); - return NULL; -} - -static void dn_dst_link_failure(struct sk_buff *skb) -{ -} - -static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2) -{ - return ((fl1->daddr ^ fl2->daddr) | - (fl1->saddr ^ fl2->saddr) | - (fl1->flowidn_mark ^ fl2->flowidn_mark) | - (fl1->flowidn_scope ^ fl2->flowidn_scope) | - (fl1->flowidn_oif ^ fl2->flowidn_oif) | - (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0; -} - -static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp) -{ - struct dn_route *rth; - struct dn_route __rcu **rthp; - unsigned long now = jiffies; - - rthp = &dn_rt_hash_table[hash].chain; - - spin_lock_bh(&dn_rt_hash_table[hash].lock); - while ((rth = rcu_dereference_protected(*rthp, - lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) { - if (compare_keys(&rth->fld, &rt->fld)) { - /* Put it first */ - *rthp = rth->dn_next; - rcu_assign_pointer(rth->dn_next, - dn_rt_hash_table[hash].chain); - rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); - - dst_hold_and_use(&rth->dst, now); - spin_unlock_bh(&dn_rt_hash_table[hash].lock); - - dst_release_immediate(&rt->dst); - *rp = rth; - return 0; - } - rthp = &rth->dn_next; - } - - rcu_assign_pointer(rt->dn_next, dn_rt_hash_table[hash].chain); - rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); - - dst_hold_and_use(&rt->dst, now); - spin_unlock_bh(&dn_rt_hash_table[hash].lock); - *rp = rt; - return 0; -} - -static void dn_run_flush(struct timer_list *unused) -{ - int i; - struct dn_route *rt, *next; - - for (i = 0; i < dn_rt_hash_mask; i++) { - spin_lock_bh(&dn_rt_hash_table[i].lock); - - rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL); - if (!rt) - goto nothing_to_declare; - - for (; rt; rt = next) { - next = rcu_dereference_raw(rt->dn_next); - RCU_INIT_POINTER(rt->dn_next, NULL); - dst_dev_put(&rt->dst); - dst_release(&rt->dst); - } - -nothing_to_declare: - spin_unlock_bh(&dn_rt_hash_table[i].lock); - } -} - -static DEFINE_SPINLOCK(dn_rt_flush_lock); - -void dn_rt_cache_flush(int delay) -{ - unsigned long now = jiffies; - int user_mode = !in_interrupt(); - - if (delay < 0) - delay = dn_rt_min_delay; - - spin_lock_bh(&dn_rt_flush_lock); - - if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { - long tmo = (long)(dn_rt_deadline - now); - - if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay) - tmo = 0; - - if (delay > tmo) - delay = tmo; - } - - if (delay <= 0) { - spin_unlock_bh(&dn_rt_flush_lock); - dn_run_flush(NULL); - return; - } - - if (dn_rt_deadline == 0) - dn_rt_deadline = now + dn_rt_max_delay; - - dn_rt_flush_timer.expires = now + delay; - add_timer(&dn_rt_flush_timer); - spin_unlock_bh(&dn_rt_flush_lock); -} - -/** - * dn_return_short - Return a short packet to its sender - * @skb: The packet to return - * - */ -static int dn_return_short(struct sk_buff *skb) -{ - struct dn_skb_cb *cb; - unsigned char *ptr; - __le16 *src; - __le16 *dst; - - /* Add back headers */ - skb_push(skb, skb->data - skb_network_header(skb)); - - skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) - return NET_RX_DROP; - - cb = DN_SKB_CB(skb); - /* Skip packet length and point to flags */ - ptr = skb->data + 2; - *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; - - dst = (__le16 *)ptr; - ptr += 2; - src = (__le16 *)ptr; - ptr += 2; - *ptr = 0; /* Zero hop count */ - - swap(*src, *dst); - - skb->pkt_type = PACKET_OUTGOING; - dn_rt_finish_output(skb, NULL, NULL); - return NET_RX_SUCCESS; -} - -/** - * dn_return_long - Return a long packet to its sender - * @skb: The long format packet to return - * - */ -static int dn_return_long(struct sk_buff *skb) -{ - struct dn_skb_cb *cb; - unsigned char *ptr; - unsigned char *src_addr, *dst_addr; - unsigned char tmp[ETH_ALEN]; - - /* Add back all headers */ - skb_push(skb, skb->data - skb_network_header(skb)); - - skb = skb_unshare(skb, GFP_ATOMIC); - if (!skb) - return NET_RX_DROP; - - cb = DN_SKB_CB(skb); - /* Ignore packet length and point to flags */ - ptr = skb->data + 2; - - /* Skip padding */ - if (*ptr & DN_RT_F_PF) { - char padlen = (*ptr & ~DN_RT_F_PF); - ptr += padlen; - } - - *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; - ptr += 2; - dst_addr = ptr; - ptr += 8; - src_addr = ptr; - ptr += 6; - *ptr = 0; /* Zero hop count */ - - /* Swap source and destination */ - memcpy(tmp, src_addr, ETH_ALEN); - memcpy(src_addr, dst_addr, ETH_ALEN); - memcpy(dst_addr, tmp, ETH_ALEN); - - skb->pkt_type = PACKET_OUTGOING; - dn_rt_finish_output(skb, dst_addr, src_addr); - return NET_RX_SUCCESS; -} - -/** - * dn_route_rx_packet - Try and find a route for an incoming packet - * @net: The applicable net namespace - * @sk: Socket packet transmitted on - * @skb: The packet to find a route for - * - * Returns: result of input function if route is found, error code otherwise - */ -static int dn_route_rx_packet(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - struct dn_skb_cb *cb; - int err; - - err = dn_route_input(skb); - if (err == 0) - return dst_input(skb); - - cb = DN_SKB_CB(skb); - if (decnet_debug_level & 4) { - char *devname = skb->dev ? skb->dev->name : "???"; - - printk(KERN_DEBUG - "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", - (int)cb->rt_flags, devname, skb->len, - le16_to_cpu(cb->src), le16_to_cpu(cb->dst), - err, skb->pkt_type); - } - - if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { - switch (cb->rt_flags & DN_RT_PKT_MSK) { - case DN_RT_PKT_SHORT: - return dn_return_short(skb); - case DN_RT_PKT_LONG: - return dn_return_long(skb); - } - } - - kfree_skb(skb); - return NET_RX_DROP; -} - -static int dn_route_rx_long(struct sk_buff *skb) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - unsigned char *ptr = skb->data; - - if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */ - goto drop_it; - - skb_pull(skb, 20); - skb_reset_transport_header(skb); - - /* Destination info */ - ptr += 2; - cb->dst = dn_eth2dn(ptr); - if (memcmp(ptr, dn_hiord_addr, 4) != 0) - goto drop_it; - ptr += 6; - - - /* Source info */ - ptr += 2; - cb->src = dn_eth2dn(ptr); - if (memcmp(ptr, dn_hiord_addr, 4) != 0) - goto drop_it; - ptr += 6; - /* Other junk */ - ptr++; - cb->hops = *ptr++; /* Visit Count */ - - return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, - &init_net, NULL, skb, skb->dev, NULL, - dn_route_rx_packet); - -drop_it: - kfree_skb(skb); - return NET_RX_DROP; -} - - - -static int dn_route_rx_short(struct sk_buff *skb) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - unsigned char *ptr = skb->data; - - if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */ - goto drop_it; - - skb_pull(skb, 5); - skb_reset_transport_header(skb); - - cb->dst = *(__le16 *)ptr; - ptr += 2; - cb->src = *(__le16 *)ptr; - ptr += 2; - cb->hops = *ptr & 0x3f; - - return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, - &init_net, NULL, skb, skb->dev, NULL, - dn_route_rx_packet); - -drop_it: - kfree_skb(skb); - return NET_RX_DROP; -} - -static int dn_route_discard(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - /* - * I know we drop the packet here, but that's considered success in - * this case - */ - kfree_skb(skb); - return NET_RX_SUCCESS; -} - -static int dn_route_ptp_hello(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - dn_dev_hello(skb); - dn_neigh_pointopoint_hello(skb); - return NET_RX_SUCCESS; -} - -int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) -{ - struct dn_skb_cb *cb; - unsigned char flags = 0; - __u16 len = le16_to_cpu(*(__le16 *)skb->data); - struct dn_dev *dn = rcu_dereference(dev->dn_ptr); - unsigned char padlen = 0; - - if (!net_eq(dev_net(dev), &init_net)) - goto dump_it; - - if (dn == NULL) - goto dump_it; - - skb = skb_share_check(skb, GFP_ATOMIC); - if (!skb) - goto out; - - if (!pskb_may_pull(skb, 3)) - goto dump_it; - - skb_pull(skb, 2); - - if (len > skb->len) - goto dump_it; - - skb_trim(skb, len); - - flags = *skb->data; - - cb = DN_SKB_CB(skb); - cb->stamp = jiffies; - cb->iif = dev->ifindex; - - /* - * If we have padding, remove it. - */ - if (flags & DN_RT_F_PF) { - padlen = flags & ~DN_RT_F_PF; - if (!pskb_may_pull(skb, padlen + 1)) - goto dump_it; - skb_pull(skb, padlen); - flags = *skb->data; - } - - skb_reset_network_header(skb); - - /* - * Weed out future version DECnet - */ - if (flags & DN_RT_F_VER) - goto dump_it; - - cb->rt_flags = flags; - - if (decnet_debug_level & 1) - printk(KERN_DEBUG - "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", - (int)flags, dev->name, len, skb->len, - padlen); - - if (flags & DN_RT_PKT_CNTL) { - if (unlikely(skb_linearize(skb))) - goto dump_it; - - switch (flags & DN_RT_CNTL_MSK) { - case DN_RT_PKT_INIT: - dn_dev_init_pkt(skb); - break; - case DN_RT_PKT_VERI: - dn_dev_veri_pkt(skb); - break; - } - - if (dn->parms.state != DN_DEV_S_RU) - goto dump_it; - - switch (flags & DN_RT_CNTL_MSK) { - case DN_RT_PKT_HELO: - return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, - &init_net, NULL, skb, skb->dev, NULL, - dn_route_ptp_hello); - - case DN_RT_PKT_L1RT: - case DN_RT_PKT_L2RT: - return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, - &init_net, NULL, skb, skb->dev, NULL, - dn_route_discard); - case DN_RT_PKT_ERTH: - return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, - &init_net, NULL, skb, skb->dev, NULL, - dn_neigh_router_hello); - - case DN_RT_PKT_EEDH: - return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, - &init_net, NULL, skb, skb->dev, NULL, - dn_neigh_endnode_hello); - } - } else { - if (dn->parms.state != DN_DEV_S_RU) - goto dump_it; - - skb_pull(skb, 1); /* Pull flags */ - - switch (flags & DN_RT_PKT_MSK) { - case DN_RT_PKT_LONG: - return dn_route_rx_long(skb); - case DN_RT_PKT_SHORT: - return dn_route_rx_short(skb); - } - } - -dump_it: - kfree_skb(skb); -out: - return NET_RX_DROP; -} - -static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - struct dn_route *rt = (struct dn_route *)dst; - struct net_device *dev = dst->dev; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - - int err = -EINVAL; - - if (rt->n == NULL) - goto error; - - skb->dev = dev; - - cb->src = rt->rt_saddr; - cb->dst = rt->rt_daddr; - - /* - * Always set the Intra-Ethernet bit on all outgoing packets - * originated on this node. Only valid flag from upper layers - * is return-to-sender-requested. Set hop count to 0 too. - */ - cb->rt_flags &= ~DN_RT_F_RQR; - cb->rt_flags |= DN_RT_F_IE; - cb->hops = 0; - - return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, - &init_net, sk, skb, NULL, dev, - dn_to_neigh_output); - -error: - net_dbg_ratelimited("dn_output: This should not happen\n"); - - kfree_skb(skb); - - return err; -} - -static int dn_forward(struct sk_buff *skb) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct dst_entry *dst = skb_dst(skb); - struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); - struct dn_route *rt; - int header_len; - struct net_device *dev = skb->dev; - - if (skb->pkt_type != PACKET_HOST) - goto drop; - - /* Ensure that we have enough space for headers */ - rt = (struct dn_route *)skb_dst(skb); - header_len = dn_db->use_long ? 21 : 6; - if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) - goto drop; - - /* - * Hop count exceeded. - */ - if (++cb->hops > 30) - goto drop; - - skb->dev = rt->dst.dev; - - /* - * If packet goes out same interface it came in on, then set - * the Intra-Ethernet bit. This has no effect for short - * packets, so we don't need to test for them here. - */ - cb->rt_flags &= ~DN_RT_F_IE; - if (rt->rt_flags & RTCF_DOREDIRECT) - cb->rt_flags |= DN_RT_F_IE; - - return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, - &init_net, NULL, skb, dev, skb->dev, - dn_to_neigh_output); - -drop: - kfree_skb(skb); - return NET_RX_DROP; -} - -/* - * Used to catch bugs. This should never normally get - * called. - */ -static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - - net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n", - le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); - - kfree_skb(skb); - - return NET_RX_DROP; -} - -static int dn_rt_bug(struct sk_buff *skb) -{ - struct dn_skb_cb *cb = DN_SKB_CB(skb); - - net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n", - le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); - - kfree_skb(skb); - - return NET_RX_DROP; -} - -static unsigned int dn_dst_default_advmss(const struct dst_entry *dst) -{ - return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); -} - -static unsigned int dn_dst_mtu(const struct dst_entry *dst) -{ - unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); - - return mtu ? : dst->dev->mtu; -} - -static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, - struct sk_buff *skb, - const void *daddr) -{ - return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev); -} - -static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) -{ - struct dn_fib_info *fi = res->fi; - struct net_device *dev = rt->dst.dev; - unsigned int mss_metric; - struct neighbour *n; - - if (fi) { - if (DN_FIB_RES_GW(*res) && - DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) - rt->rt_gateway = DN_FIB_RES_GW(*res); - dst_init_metrics(&rt->dst, fi->fib_metrics, true); - } - rt->rt_type = res->type; - - if (dev != NULL && rt->n == NULL) { - n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); - if (IS_ERR(n)) - return PTR_ERR(n); - rt->n = n; - } - - if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) - dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu); - mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); - if (mss_metric) { - unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); - if (mss_metric > mss) - dst_metric_set(&rt->dst, RTAX_ADVMSS, mss); - } - return 0; -} - -static inline int dn_match_addr(__le16 addr1, __le16 addr2) -{ - __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2); - int match = 16; - while (tmp) { - tmp >>= 1; - match--; - } - return match; -} - -static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) -{ - __le16 saddr = 0; - struct dn_dev *dn_db; - struct dn_ifaddr *ifa; - int best_match = 0; - int ret; - - rcu_read_lock(); - dn_db = rcu_dereference(dev->dn_ptr); - for (ifa = rcu_dereference(dn_db->ifa_list); - ifa != NULL; - ifa = rcu_dereference(ifa->ifa_next)) { - if (ifa->ifa_scope > scope) - continue; - if (!daddr) { - saddr = ifa->ifa_local; - break; - } - ret = dn_match_addr(daddr, ifa->ifa_local); - if (ret > best_match) - saddr = ifa->ifa_local; - if (best_match == 0) - saddr = ifa->ifa_local; - } - rcu_read_unlock(); - - return saddr; -} - -static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res) -{ - return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope); -} - -static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res) -{ - __le16 mask = dnet_make_mask(res->prefixlen); - return (daddr&~mask)|res->fi->fib_nh->nh_gw; -} - -static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard) -{ - struct flowidn fld = { - .daddr = oldflp->daddr, - .saddr = oldflp->saddr, - .flowidn_scope = RT_SCOPE_UNIVERSE, - .flowidn_mark = oldflp->flowidn_mark, - .flowidn_iif = LOOPBACK_IFINDEX, - .flowidn_oif = oldflp->flowidn_oif, - }; - struct dn_route *rt = NULL; - struct net_device *dev_out = NULL, *dev; - struct neighbour *neigh = NULL; - unsigned int hash; - unsigned int flags = 0; - struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; - int err; - int free_res = 0; - __le16 gateway = 0; - - if (decnet_debug_level & 16) - printk(KERN_DEBUG - "dn_route_output_slow: dst=%04x src=%04x mark=%d" - " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr), - le16_to_cpu(oldflp->saddr), - oldflp->flowidn_mark, LOOPBACK_IFINDEX, - oldflp->flowidn_oif); - - /* If we have an output interface, verify its a DECnet device */ - if (oldflp->flowidn_oif) { - dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif); - err = -ENODEV; - if (dev_out && dev_out->dn_ptr == NULL) { - dev_put(dev_out); - dev_out = NULL; - } - if (dev_out == NULL) - goto out; - } - - /* If we have a source address, verify that its a local address */ - if (oldflp->saddr) { - err = -EADDRNOTAVAIL; - - if (dev_out) { - if (dn_dev_islocal(dev_out, oldflp->saddr)) - goto source_ok; - dev_put(dev_out); - goto out; - } - rcu_read_lock(); - for_each_netdev_rcu(&init_net, dev) { - if (!dev->dn_ptr) - continue; - if (!dn_dev_islocal(dev, oldflp->saddr)) - continue; - if ((dev->flags & IFF_LOOPBACK) && - oldflp->daddr && - !dn_dev_islocal(dev, oldflp->daddr)) - continue; - - dev_out = dev; - break; - } - rcu_read_unlock(); - if (dev_out == NULL) - goto out; - dev_hold(dev_out); -source_ok: - ; - } - - /* No destination? Assume its local */ - if (!fld.daddr) { - fld.daddr = fld.saddr; - - dev_put(dev_out); - err = -EINVAL; - dev_out = init_net.loopback_dev; - if (!dev_out->dn_ptr) - goto out; - err = -EADDRNOTAVAIL; - dev_hold(dev_out); - if (!fld.daddr) { - fld.daddr = - fld.saddr = dnet_select_source(dev_out, 0, - RT_SCOPE_HOST); - if (!fld.daddr) - goto done; - } - fld.flowidn_oif = LOOPBACK_IFINDEX; - res.type = RTN_LOCAL; - goto make_route; - } - - if (decnet_debug_level & 16) - printk(KERN_DEBUG - "dn_route_output_slow: initial checks complete." - " dst=%04x src=%04x oif=%d try_hard=%d\n", - le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), - fld.flowidn_oif, try_hard); - - /* - * N.B. If the kernel is compiled without router support then - * dn_fib_lookup() will evaluate to non-zero so this if () block - * will always be executed. - */ - err = -ESRCH; - if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) { - struct dn_dev *dn_db; - if (err != -ESRCH) - goto out; - /* - * Here the fallback is basically the standard algorithm for - * routing in endnodes which is described in the DECnet routing - * docs - * - * If we are not trying hard, look in neighbour cache. - * The result is tested to ensure that if a specific output - * device/source address was requested, then we honour that - * here - */ - if (!try_hard) { - neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr); - if (neigh) { - if ((oldflp->flowidn_oif && - (neigh->dev->ifindex != oldflp->flowidn_oif)) || - (oldflp->saddr && - (!dn_dev_islocal(neigh->dev, - oldflp->saddr)))) { - neigh_release(neigh); - neigh = NULL; - } else { - dev_put(dev_out); - if (dn_dev_islocal(neigh->dev, fld.daddr)) { - dev_out = init_net.loopback_dev; - res.type = RTN_LOCAL; - } else { - dev_out = neigh->dev; - } - dev_hold(dev_out); - goto select_source; - } - } - } - - /* Not there? Perhaps its a local address */ - if (dev_out == NULL) - dev_out = dn_dev_get_default(); - err = -ENODEV; - if (dev_out == NULL) - goto out; - dn_db = rcu_dereference_raw(dev_out->dn_ptr); - if (!dn_db) - goto e_inval; - /* Possible improvement - check all devices for local addr */ - if (dn_dev_islocal(dev_out, fld.daddr)) { - dev_put(dev_out); - dev_out = init_net.loopback_dev; - dev_hold(dev_out); - res.type = RTN_LOCAL; - goto select_source; - } - /* Not local either.... try sending it to the default router */ - neigh = neigh_clone(dn_db->router); - BUG_ON(neigh && neigh->dev != dev_out); - - /* Ok then, we assume its directly connected and move on */ -select_source: - if (neigh) - gateway = container_of(neigh, struct dn_neigh, n)->addr; - if (gateway == 0) - gateway = fld.daddr; - if (fld.saddr == 0) { - fld.saddr = dnet_select_source(dev_out, gateway, - res.type == RTN_LOCAL ? - RT_SCOPE_HOST : - RT_SCOPE_LINK); - if (fld.saddr == 0 && res.type != RTN_LOCAL) - goto e_addr; - } - fld.flowidn_oif = dev_out->ifindex; - goto make_route; - } - free_res = 1; - - if (res.type == RTN_NAT) - goto e_inval; - - if (res.type == RTN_LOCAL) { - if (!fld.saddr) - fld.saddr = fld.daddr; - dev_put(dev_out); - dev_out = init_net.loopback_dev; - dev_hold(dev_out); - if (!dev_out->dn_ptr) - goto e_inval; - fld.flowidn_oif = dev_out->ifindex; - if (res.fi) - dn_fib_info_put(res.fi); - res.fi = NULL; - goto make_route; - } - - if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0) - dn_fib_select_multipath(&fld, &res); - - /* - * We could add some logic to deal with default routes here and - * get rid of some of the special casing above. - */ - - if (!fld.saddr) - fld.saddr = DN_FIB_RES_PREFSRC(res); - - dev_put(dev_out); - dev_out = DN_FIB_RES_DEV(res); - dev_hold(dev_out); - fld.flowidn_oif = dev_out->ifindex; - gateway = DN_FIB_RES_GW(res); - -make_route: - if (dev_out->flags & IFF_LOOPBACK) - flags |= RTCF_LOCAL; - - rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, 0); - if (rt == NULL) - goto e_nobufs; - - rt->dn_next = NULL; - memset(&rt->fld, 0, sizeof(rt->fld)); - rt->fld.saddr = oldflp->saddr; - rt->fld.daddr = oldflp->daddr; - rt->fld.flowidn_oif = oldflp->flowidn_oif; - rt->fld.flowidn_iif = 0; - rt->fld.flowidn_mark = oldflp->flowidn_mark; - - rt->rt_saddr = fld.saddr; - rt->rt_daddr = fld.daddr; - rt->rt_gateway = gateway ? gateway : fld.daddr; - rt->rt_local_src = fld.saddr; - - rt->rt_dst_map = fld.daddr; - rt->rt_src_map = fld.saddr; - - rt->n = neigh; - neigh = NULL; - - rt->dst.lastuse = jiffies; - rt->dst.output = dn_output; - rt->dst.input = dn_rt_bug; - rt->rt_flags = flags; - if (flags & RTCF_LOCAL) - rt->dst.input = dn_nsp_rx; - - err = dn_rt_set_next_hop(rt, &res); - if (err) - goto e_neighbour; - - hash = dn_hash(rt->fld.saddr, rt->fld.daddr); - /* dn_insert_route() increments dst->__refcnt */ - dn_insert_route(rt, hash, (struct dn_route **)pprt); - -done: - if (neigh) - neigh_release(neigh); - if (free_res) - dn_fib_res_put(&res); - dev_put(dev_out); -out: - return err; - -e_addr: - err = -EADDRNOTAVAIL; - goto done; -e_inval: - err = -EINVAL; - goto done; -e_nobufs: - err = -ENOBUFS; - goto done; -e_neighbour: - dst_release_immediate(&rt->dst); - goto e_nobufs; -} - - -/* - * N.B. The flags may be moved into the flowi at some future stage. - */ -static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags) -{ - unsigned int hash = dn_hash(flp->saddr, flp->daddr); - struct dn_route *rt = NULL; - - if (!(flags & MSG_TRYHARD)) { - rcu_read_lock_bh(); - for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; - rt = rcu_dereference_bh(rt->dn_next)) { - if ((flp->daddr == rt->fld.daddr) && - (flp->saddr == rt->fld.saddr) && - (flp->flowidn_mark == rt->fld.flowidn_mark) && - dn_is_output_route(rt) && - (rt->fld.flowidn_oif == flp->flowidn_oif)) { - dst_hold_and_use(&rt->dst, jiffies); - rcu_read_unlock_bh(); - *pprt = &rt->dst; - return 0; - } - } - rcu_read_unlock_bh(); - } - - return dn_route_output_slow(pprt, flp, flags); -} - -static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags) -{ - int err; - - err = __dn_route_output_key(pprt, flp, flags); - if (err == 0 && flp->flowidn_proto) { - *pprt = xfrm_lookup(&init_net, *pprt, - flowidn_to_flowi(flp), NULL, 0); - if (IS_ERR(*pprt)) { - err = PTR_ERR(*pprt); - *pprt = NULL; - } - } - return err; -} - -int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, struct sock *sk, int flags) -{ - int err; - - err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); - if (err == 0 && fl->flowidn_proto) { - *pprt = xfrm_lookup(&init_net, *pprt, - flowidn_to_flowi(fl), sk, 0); - if (IS_ERR(*pprt)) { - err = PTR_ERR(*pprt); - *pprt = NULL; - } - } - return err; -} - -static int dn_route_input_slow(struct sk_buff *skb) -{ - struct dn_route *rt = NULL; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - struct net_device *in_dev = skb->dev; - struct net_device *out_dev = NULL; - struct dn_dev *dn_db; - struct neighbour *neigh = NULL; - unsigned int hash; - int flags = 0; - __le16 gateway = 0; - __le16 local_src = 0; - struct flowidn fld = { - .daddr = cb->dst, - .saddr = cb->src, - .flowidn_scope = RT_SCOPE_UNIVERSE, - .flowidn_mark = skb->mark, - .flowidn_iif = skb->dev->ifindex, - }; - struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; - int err = -EINVAL; - int free_res = 0; - - dev_hold(in_dev); - - dn_db = rcu_dereference(in_dev->dn_ptr); - if (!dn_db) - goto out; - - /* Zero source addresses are not allowed */ - if (fld.saddr == 0) - goto out; - - /* - * In this case we've just received a packet from a source - * outside ourselves pretending to come from us. We don't - * allow it any further to prevent routing loops, spoofing and - * other nasties. Loopback packets already have the dst attached - * so this only affects packets which have originated elsewhere. - */ - err = -ENOTUNIQ; - if (dn_dev_islocal(in_dev, cb->src)) - goto out; - - err = dn_fib_lookup(&fld, &res); - if (err) { - if (err != -ESRCH) - goto out; - /* - * Is the destination us ? - */ - if (!dn_dev_islocal(in_dev, cb->dst)) - goto e_inval; - - res.type = RTN_LOCAL; - } else { - __le16 src_map = fld.saddr; - free_res = 1; - - out_dev = DN_FIB_RES_DEV(res); - if (out_dev == NULL) { - net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n"); - goto e_inval; - } - dev_hold(out_dev); - - if (res.r) - src_map = fld.saddr; /* no NAT support for now */ - - gateway = DN_FIB_RES_GW(res); - if (res.type == RTN_NAT) { - fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res); - dn_fib_res_put(&res); - free_res = 0; - if (dn_fib_lookup(&fld, &res)) - goto e_inval; - free_res = 1; - if (res.type != RTN_UNICAST) - goto e_inval; - flags |= RTCF_DNAT; - gateway = fld.daddr; - } - fld.saddr = src_map; - } - - switch (res.type) { - case RTN_UNICAST: - /* - * Forwarding check here, we only check for forwarding - * being turned off, if you want to only forward intra - * area, its up to you to set the routing tables up - * correctly. - */ - if (dn_db->parms.forwarding == 0) - goto e_inval; - - if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0) - dn_fib_select_multipath(&fld, &res); - - /* - * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT - * flag as a hint to set the intra-ethernet bit when - * forwarding. If we've got NAT in operation, we don't do - * this optimisation. - */ - if (out_dev == in_dev && !(flags & RTCF_NAT)) - flags |= RTCF_DOREDIRECT; - - local_src = DN_FIB_RES_PREFSRC(res); - break; - case RTN_BLACKHOLE: - case RTN_UNREACHABLE: - break; - case RTN_LOCAL: - flags |= RTCF_LOCAL; - fld.saddr = cb->dst; - fld.daddr = cb->src; - - /* Routing tables gave us a gateway */ - if (gateway) - goto make_route; - - /* Packet was intra-ethernet, so we know its on-link */ - if (cb->rt_flags & DN_RT_F_IE) { - gateway = cb->src; - goto make_route; - } - - /* Use the default router if there is one */ - neigh = neigh_clone(dn_db->router); - if (neigh) { - gateway = container_of(neigh, struct dn_neigh, n)->addr; - goto make_route; - } - - /* Close eyes and pray */ - gateway = cb->src; - goto make_route; - default: - goto e_inval; - } - -make_route: - rt = dst_alloc(&dn_dst_ops, out_dev, 1, DST_OBSOLETE_NONE, 0); - if (rt == NULL) - goto e_nobufs; - - rt->dn_next = NULL; - memset(&rt->fld, 0, sizeof(rt->fld)); - rt->rt_saddr = fld.saddr; - rt->rt_daddr = fld.daddr; - rt->rt_gateway = fld.daddr; - if (gateway) - rt->rt_gateway = gateway; - rt->rt_local_src = local_src ? local_src : rt->rt_saddr; - - rt->rt_dst_map = fld.daddr; - rt->rt_src_map = fld.saddr; - - rt->fld.saddr = cb->src; - rt->fld.daddr = cb->dst; - rt->fld.flowidn_oif = 0; - rt->fld.flowidn_iif = in_dev->ifindex; - rt->fld.flowidn_mark = fld.flowidn_mark; - - rt->n = neigh; - rt->dst.lastuse = jiffies; - rt->dst.output = dn_rt_bug_out; - switch (res.type) { - case RTN_UNICAST: - rt->dst.input = dn_forward; - break; - case RTN_LOCAL: - rt->dst.output = dn_output; - rt->dst.input = dn_nsp_rx; - rt->dst.dev = in_dev; - flags |= RTCF_LOCAL; - break; - default: - case RTN_UNREACHABLE: - case RTN_BLACKHOLE: - rt->dst.input = dst_discard; - } - rt->rt_flags = flags; - - err = dn_rt_set_next_hop(rt, &res); - if (err) - goto e_neighbour; - - hash = dn_hash(rt->fld.saddr, rt->fld.daddr); - /* dn_insert_route() increments dst->__refcnt */ - dn_insert_route(rt, hash, &rt); - skb_dst_set(skb, &rt->dst); - -done: - if (neigh) - neigh_release(neigh); - if (free_res) - dn_fib_res_put(&res); - dev_put(in_dev); - dev_put(out_dev); -out: - return err; - -e_inval: - err = -EINVAL; - goto done; - -e_nobufs: - err = -ENOBUFS; - goto done; - -e_neighbour: - dst_release_immediate(&rt->dst); - goto done; -} - -static int dn_route_input(struct sk_buff *skb) -{ - struct dn_route *rt; - struct dn_skb_cb *cb = DN_SKB_CB(skb); - unsigned int hash = dn_hash(cb->src, cb->dst); - - if (skb_dst(skb)) - return 0; - - rcu_read_lock(); - for (rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; - rt = rcu_dereference(rt->dn_next)) { - if ((rt->fld.saddr == cb->src) && - (rt->fld.daddr == cb->dst) && - (rt->fld.flowidn_oif == 0) && - (rt->fld.flowidn_mark == skb->mark) && - (rt->fld.flowidn_iif == cb->iif)) { - dst_hold_and_use(&rt->dst, jiffies); - rcu_read_unlock(); - skb_dst_set(skb, (struct dst_entry *)rt); - return 0; - } - } - rcu_read_unlock(); - - return dn_route_input_slow(skb); -} - -static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq, - int event, int nowait, unsigned int flags) -{ - struct dn_route *rt = (struct dn_route *)skb_dst(skb); - struct rtmsg *r; - struct nlmsghdr *nlh; - long expires; - - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); - if (!nlh) - return -EMSGSIZE; - - r = nlmsg_data(nlh); - r->rtm_family = AF_DECnet; - r->rtm_dst_len = 16; - r->rtm_src_len = 0; - r->rtm_tos = 0; - r->rtm_table = RT_TABLE_MAIN; - r->rtm_type = rt->rt_type; - r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; - r->rtm_scope = RT_SCOPE_UNIVERSE; - r->rtm_protocol = RTPROT_UNSPEC; - - if (rt->rt_flags & RTCF_NOTIFY) - r->rtm_flags |= RTM_F_NOTIFY; - - if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 || - nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0) - goto errout; - - if (rt->fld.saddr) { - r->rtm_src_len = 16; - if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0) - goto errout; - } - if (rt->dst.dev && - nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0) - goto errout; - - /* - * Note to self - change this if input routes reverse direction when - * they deal only with inputs and not with replies like they do - * currently. - */ - if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0) - goto errout; - - if (rt->rt_daddr != rt->rt_gateway && - nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0) - goto errout; - - if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) - goto errout; - - expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; - if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, - rt->dst.error) < 0) - goto errout; - - if (dn_is_input_route(rt) && - nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0) - goto errout; - - nlmsg_end(skb, nlh); - return 0; - -errout: - nlmsg_cancel(skb, nlh); - return -EMSGSIZE; -} - -const struct nla_policy rtm_dn_policy[RTA_MAX + 1] = { - [RTA_DST] = { .type = NLA_U16 }, - [RTA_SRC] = { .type = NLA_U16 }, - [RTA_IIF] = { .type = NLA_U32 }, - [RTA_OIF] = { .type = NLA_U32 }, - [RTA_GATEWAY] = { .type = NLA_U16 }, - [RTA_PRIORITY] = { .type = NLA_U32 }, - [RTA_PREFSRC] = { .type = NLA_U16 }, - [RTA_METRICS] = { .type = NLA_NESTED }, - [RTA_MULTIPATH] = { .type = NLA_NESTED }, - [RTA_TABLE] = { .type = NLA_U32 }, - [RTA_MARK] = { .type = NLA_U32 }, -}; - -/* - * This is called by both endnodes and routers now. - */ -static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack) -{ - struct net *net = sock_net(in_skb->sk); - struct rtmsg *rtm = nlmsg_data(nlh); - struct dn_route *rt = NULL; - struct dn_skb_cb *cb; - int err; - struct sk_buff *skb; - struct flowidn fld; - struct nlattr *tb[RTA_MAX+1]; - - if (!net_eq(net, &init_net)) - return -EINVAL; - - err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, - rtm_dn_policy, extack); - if (err < 0) - return err; - - memset(&fld, 0, sizeof(fld)); - fld.flowidn_proto = DNPROTO_NSP; - - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (skb == NULL) - return -ENOBUFS; - skb_reset_mac_header(skb); - cb = DN_SKB_CB(skb); - - if (tb[RTA_SRC]) - fld.saddr = nla_get_le16(tb[RTA_SRC]); - - if (tb[RTA_DST]) - fld.daddr = nla_get_le16(tb[RTA_DST]); - - if (tb[RTA_IIF]) - fld.flowidn_iif = nla_get_u32(tb[RTA_IIF]); - - if (fld.flowidn_iif) { - struct net_device *dev; - dev = __dev_get_by_index(&init_net, fld.flowidn_iif); - if (!dev || !dev->dn_ptr) { - kfree_skb(skb); - return -ENODEV; - } - skb->protocol = htons(ETH_P_DNA_RT); - skb->dev = dev; - cb->src = fld.saddr; - cb->dst = fld.daddr; - local_bh_disable(); - err = dn_route_input(skb); - local_bh_enable(); - memset(cb, 0, sizeof(struct dn_skb_cb)); - rt = (struct dn_route *)skb_dst(skb); - if (!err && -rt->dst.error) - err = rt->dst.error; - } else { - if (tb[RTA_OIF]) - fld.flowidn_oif = nla_get_u32(tb[RTA_OIF]); - - err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0); - } - - skb->dev = NULL; - if (err) - goto out_free; - skb_dst_set(skb, &rt->dst); - if (rtm->rtm_flags & RTM_F_NOTIFY) - rt->rt_flags |= RTCF_NOTIFY; - - err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); - if (err < 0) { - err = -EMSGSIZE; - goto out_free; - } - - return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).portid); - -out_free: - kfree_skb(skb); - return err; -} - -/* - * For routers, this is called from dn_fib_dump, but for endnodes its - * called directly from the rtnetlink dispatch table. - */ -int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct net *net = sock_net(skb->sk); - struct dn_route *rt; - int h, s_h; - int idx, s_idx; - struct rtmsg *rtm; - - if (!net_eq(net, &init_net)) - return 0; - - if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg)) - return -EINVAL; - - rtm = nlmsg_data(cb->nlh); - if (!(rtm->rtm_flags & RTM_F_CLONED)) - return 0; - - s_h = cb->args[0]; - s_idx = idx = cb->args[1]; - for (h = 0; h <= dn_rt_hash_mask; h++) { - if (h < s_h) - continue; - if (h > s_h) - s_idx = 0; - rcu_read_lock_bh(); - for (rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; - rt; - rt = rcu_dereference_bh(rt->dn_next), idx++) { - if (idx < s_idx) - continue; - skb_dst_set(skb, dst_clone(&rt->dst)); - if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, RTM_NEWROUTE, - 1, NLM_F_MULTI) < 0) { - skb_dst_drop(skb); - rcu_read_unlock_bh(); - goto done; - } - skb_dst_drop(skb); - } - rcu_read_unlock_bh(); - } - -done: - cb->args[0] = h; - cb->args[1] = idx; - return skb->len; -} - -#ifdef CONFIG_PROC_FS -struct dn_rt_cache_iter_state { - int bucket; -}; - -static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) -{ - struct dn_route *rt = NULL; - struct dn_rt_cache_iter_state *s = seq->private; - - for (s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { - rcu_read_lock_bh(); - rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); - if (rt) - break; - rcu_read_unlock_bh(); - } - return rt; -} - -static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) -{ - struct dn_rt_cache_iter_state *s = seq->private; - - rt = rcu_dereference_bh(rt->dn_next); - while (!rt) { - rcu_read_unlock_bh(); - if (--s->bucket < 0) - break; - rcu_read_lock_bh(); - rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); - } - return rt; -} - -static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) -{ - struct dn_route *rt = dn_rt_cache_get_first(seq); - - if (rt) { - while (*pos && (rt = dn_rt_cache_get_next(seq, rt))) - --*pos; - } - return *pos ? NULL : rt; -} - -static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - struct dn_route *rt = dn_rt_cache_get_next(seq, v); - ++*pos; - return rt; -} - -static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v) -{ - if (v) - rcu_read_unlock_bh(); -} - -static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) -{ - struct dn_route *rt = v; - char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; - - seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", - rt->dst.dev ? rt->dst.dev->name : "*", - dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), - dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), - atomic_read(&rt->dst.__refcnt), - rt->dst.__use, 0); - return 0; -} - -static const struct seq_operations dn_rt_cache_seq_ops = { - .start = dn_rt_cache_seq_start, - .next = dn_rt_cache_seq_next, - .stop = dn_rt_cache_seq_stop, - .show = dn_rt_cache_seq_show, -}; -#endif /* CONFIG_PROC_FS */ - -void __init dn_route_init(void) -{ - int i, goal, order; - - dn_dst_ops.kmem_cachep = - kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - dst_entries_init(&dn_dst_ops); - timer_setup(&dn_route_timer, dn_dst_check_expire, 0); - dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; - add_timer(&dn_route_timer); - - goal = totalram_pages() >> (26 - PAGE_SHIFT); - - for (order = 0; (1UL << order) < goal; order++) - /* NOTHING */; - - /* - * Only want 1024 entries max, since the table is very, very unlikely - * to be larger than that. - */ - while (order && ((((1UL << order) * PAGE_SIZE) / - sizeof(struct dn_rt_hash_bucket)) >= 2048)) - order--; - - do { - dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / - sizeof(struct dn_rt_hash_bucket); - while (dn_rt_hash_mask & (dn_rt_hash_mask - 1)) - dn_rt_hash_mask--; - dn_rt_hash_table = (struct dn_rt_hash_bucket *) - __get_free_pages(GFP_ATOMIC, order); - } while (dn_rt_hash_table == NULL && --order > 0); - - if (!dn_rt_hash_table) - panic("Failed to allocate DECnet route cache hash table\n"); - - printk(KERN_INFO - "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", - dn_rt_hash_mask, - (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); - - dn_rt_hash_mask--; - for (i = 0; i <= dn_rt_hash_mask; i++) { - spin_lock_init(&dn_rt_hash_table[i].lock); - dn_rt_hash_table[i].chain = NULL; - } - - dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); - - proc_create_seq_private("decnet_cache", 0444, init_net.proc_net, - &dn_rt_cache_seq_ops, - sizeof(struct dn_rt_cache_iter_state), NULL); - -#ifdef CONFIG_DECNET_ROUTER - rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE, - dn_cache_getroute, dn_fib_dump, 0); -#else - rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE, - dn_cache_getroute, dn_cache_dump, 0); -#endif -} - -void __exit dn_route_cleanup(void) -{ - del_timer(&dn_route_timer); - dn_run_flush(NULL); - - remove_proc_entry("decnet_cache", init_net.proc_net); - dst_entries_destroy(&dn_dst_ops); -} diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c deleted file mode 100644 index ee73057529cf..000000000000 --- a/net/decnet/dn_rules.c +++ /dev/null @@ -1,253 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Routing Forwarding Information Base (Rules) - * - * Author: Steve Whitehouse <SteveW@ACM.org> - * Mostly copied from Alexey Kuznetsov's ipv4/fib_rules.c - * - * - * Changes: - * Steve Whitehouse <steve@chygwyn.com> - * Updated for Thomas Graf's generic rules - * - */ -#include <linux/net.h> -#include <linux/init.h> -#include <linux/netlink.h> -#include <linux/rtnetlink.h> -#include <linux/netdevice.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/rcupdate.h> -#include <linux/export.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/flow.h> -#include <net/fib_rules.h> -#include <net/dn.h> -#include <net/dn_fib.h> -#include <net/dn_neigh.h> -#include <net/dn_dev.h> -#include <net/dn_route.h> - -static struct fib_rules_ops *dn_fib_rules_ops; - -struct dn_fib_rule -{ - struct fib_rule common; - unsigned char dst_len; - unsigned char src_len; - __le16 src; - __le16 srcmask; - __le16 dst; - __le16 dstmask; - __le16 srcmap; - u8 flags; -}; - - -int dn_fib_lookup(struct flowidn *flp, struct dn_fib_res *res) -{ - struct fib_lookup_arg arg = { - .result = res, - }; - int err; - - err = fib_rules_lookup(dn_fib_rules_ops, - flowidn_to_flowi(flp), 0, &arg); - res->r = arg.rule; - - return err; -} - -static int dn_fib_rule_action(struct fib_rule *rule, struct flowi *flp, - int flags, struct fib_lookup_arg *arg) -{ - struct flowidn *fld = &flp->u.dn; - int err = -EAGAIN; - struct dn_fib_table *tbl; - - switch(rule->action) { - case FR_ACT_TO_TBL: - break; - - case FR_ACT_UNREACHABLE: - err = -ENETUNREACH; - goto errout; - - case FR_ACT_PROHIBIT: - err = -EACCES; - goto errout; - - case FR_ACT_BLACKHOLE: - default: - err = -EINVAL; - goto errout; - } - - tbl = dn_fib_get_table(rule->table, 0); - if (tbl == NULL) - goto errout; - - err = tbl->lookup(tbl, fld, (struct dn_fib_res *)arg->result); - if (err > 0) - err = -EAGAIN; -errout: - return err; -} - -static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) -{ - struct dn_fib_rule *r = (struct dn_fib_rule *)rule; - struct flowidn *fld = &fl->u.dn; - __le16 daddr = fld->daddr; - __le16 saddr = fld->saddr; - - if (((saddr ^ r->src) & r->srcmask) || - ((daddr ^ r->dst) & r->dstmask)) - return 0; - - return 1; -} - -static int dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb, - struct fib_rule_hdr *frh, - struct nlattr **tb, - struct netlink_ext_ack *extack) -{ - int err = -EINVAL; - struct dn_fib_rule *r = (struct dn_fib_rule *)rule; - - if (frh->tos) { - NL_SET_ERR_MSG(extack, "Invalid tos value"); - goto errout; - } - - if (rule->table == RT_TABLE_UNSPEC) { - if (rule->action == FR_ACT_TO_TBL) { - struct dn_fib_table *table; - - table = dn_fib_empty_table(); - if (table == NULL) { - err = -ENOBUFS; - goto errout; - } - - rule->table = table->n; - } - } - - if (frh->src_len) - r->src = nla_get_le16(tb[FRA_SRC]); - - if (frh->dst_len) - r->dst = nla_get_le16(tb[FRA_DST]); - - r->src_len = frh->src_len; - r->srcmask = dnet_make_mask(r->src_len); - r->dst_len = frh->dst_len; - r->dstmask = dnet_make_mask(r->dst_len); - err = 0; -errout: - return err; -} - -static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, - struct nlattr **tb) -{ - struct dn_fib_rule *r = (struct dn_fib_rule *)rule; - - if (frh->src_len && (r->src_len != frh->src_len)) - return 0; - - if (frh->dst_len && (r->dst_len != frh->dst_len)) - return 0; - - if (frh->src_len && (r->src != nla_get_le16(tb[FRA_SRC]))) - return 0; - - if (frh->dst_len && (r->dst != nla_get_le16(tb[FRA_DST]))) - return 0; - - return 1; -} - -unsigned int dnet_addr_type(__le16 addr) -{ - struct flowidn fld = { .daddr = addr }; - struct dn_fib_res res; - unsigned int ret = RTN_UNICAST; - struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); - - res.r = NULL; - - if (tb) { - if (!tb->lookup(tb, &fld, &res)) { - ret = res.type; - dn_fib_res_put(&res); - } - } - return ret; -} - -static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb, - struct fib_rule_hdr *frh) -{ - struct dn_fib_rule *r = (struct dn_fib_rule *)rule; - - frh->dst_len = r->dst_len; - frh->src_len = r->src_len; - frh->tos = 0; - - if ((r->dst_len && - nla_put_le16(skb, FRA_DST, r->dst)) || - (r->src_len && - nla_put_le16(skb, FRA_SRC, r->src))) - goto nla_put_failure; - return 0; - -nla_put_failure: - return -ENOBUFS; -} - -static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) -{ - dn_rt_cache_flush(-1); -} - -static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = { - .family = AF_DECnet, - .rule_size = sizeof(struct dn_fib_rule), - .addr_size = sizeof(u16), - .action = dn_fib_rule_action, - .match = dn_fib_rule_match, - .configure = dn_fib_rule_configure, - .compare = dn_fib_rule_compare, - .fill = dn_fib_rule_fill, - .flush_cache = dn_fib_rule_flush_cache, - .nlgroup = RTNLGRP_DECnet_RULE, - .owner = THIS_MODULE, - .fro_net = &init_net, -}; - -void __init dn_fib_rules_init(void) -{ - dn_fib_rules_ops = - fib_rules_register(&dn_fib_rules_ops_template, &init_net); - BUG_ON(IS_ERR(dn_fib_rules_ops)); - BUG_ON(fib_default_rule_add(dn_fib_rules_ops, 0x7fff, - RT_TABLE_MAIN, 0)); -} - -void __exit dn_fib_rules_cleanup(void) -{ - rtnl_lock(); - fib_rules_unregister(dn_fib_rules_ops); - rtnl_unlock(); - rcu_barrier(); -} diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c deleted file mode 100644 index 4086f9c746af..000000000000 --- a/net/decnet/dn_table.c +++ /dev/null @@ -1,929 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Routing Forwarding Information Base (Routing Tables) - * - * Author: Steve Whitehouse <SteveW@ACM.org> - * Mostly copied from the IPv4 routing code - * - * - * Changes: - * - */ -#include <linux/string.h> -#include <linux/net.h> -#include <linux/socket.h> -#include <linux/slab.h> -#include <linux/sockios.h> -#include <linux/init.h> -#include <linux/skbuff.h> -#include <linux/rtnetlink.h> -#include <linux/proc_fs.h> -#include <linux/netdevice.h> -#include <linux/timer.h> -#include <linux/spinlock.h> -#include <linux/atomic.h> -#include <linux/uaccess.h> -#include <linux/route.h> /* RTF_xxx */ -#include <net/neighbour.h> -#include <net/netlink.h> -#include <net/tcp.h> -#include <net/dst.h> -#include <net/flow.h> -#include <net/fib_rules.h> -#include <net/dn.h> -#include <net/dn_route.h> -#include <net/dn_fib.h> -#include <net/dn_neigh.h> -#include <net/dn_dev.h> - -struct dn_zone -{ - struct dn_zone *dz_next; - struct dn_fib_node **dz_hash; - int dz_nent; - int dz_divisor; - u32 dz_hashmask; -#define DZ_HASHMASK(dz) ((dz)->dz_hashmask) - int dz_order; - __le16 dz_mask; -#define DZ_MASK(dz) ((dz)->dz_mask) -}; - -struct dn_hash -{ - struct dn_zone *dh_zones[17]; - struct dn_zone *dh_zone_list; -}; - -#define dz_key_0(key) ((key).datum = 0) - -#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ - for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) - -#define endfor_nexthops(fi) } - -#define DN_MAX_DIVISOR 1024 -#define DN_S_ZOMBIE 1 -#define DN_S_ACCESSED 2 - -#define DN_FIB_SCAN(f, fp) \ -for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next) - -#define DN_FIB_SCAN_KEY(f, fp, key) \ -for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next) - -#define RT_TABLE_MIN 1 -#define DN_FIB_TABLE_HASHSZ 256 -static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; -static DEFINE_RWLOCK(dn_fib_tables_lock); - -static struct kmem_cache *dn_hash_kmem __read_mostly; -static int dn_fib_hash_zombies; - -static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) -{ - u16 h = le16_to_cpu(key.datum)>>(16 - dz->dz_order); - h ^= (h >> 10); - h ^= (h >> 6); - h &= DZ_HASHMASK(dz); - return *(dn_fib_idx_t *)&h; -} - -static inline dn_fib_key_t dz_key(__le16 dst, struct dn_zone *dz) -{ - dn_fib_key_t k; - k.datum = dst & DZ_MASK(dz); - return k; -} - -static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz) -{ - return &dz->dz_hash[dn_hash(key, dz).datum]; -} - -static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz) -{ - return dz->dz_hash[dn_hash(key, dz).datum]; -} - -static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b) -{ - return a.datum == b.datum; -} - -static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b) -{ - return a.datum <= b.datum; -} - -static inline void dn_rebuild_zone(struct dn_zone *dz, - struct dn_fib_node **old_ht, - int old_divisor) -{ - struct dn_fib_node *f, **fp, *next; - int i; - - for(i = 0; i < old_divisor; i++) { - for(f = old_ht[i]; f; f = next) { - next = f->fn_next; - for(fp = dn_chain_p(f->fn_key, dz); - *fp && dn_key_leq((*fp)->fn_key, f->fn_key); - fp = &(*fp)->fn_next) - /* NOTHING */; - f->fn_next = *fp; - *fp = f; - } - } -} - -static void dn_rehash_zone(struct dn_zone *dz) -{ - struct dn_fib_node **ht, **old_ht; - int old_divisor, new_divisor; - u32 new_hashmask; - - old_divisor = dz->dz_divisor; - - switch (old_divisor) { - case 16: - new_divisor = 256; - new_hashmask = 0xFF; - break; - default: - printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", - old_divisor); - fallthrough; - case 256: - new_divisor = 1024; - new_hashmask = 0x3FF; - break; - } - - ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL); - if (ht == NULL) - return; - - write_lock_bh(&dn_fib_tables_lock); - old_ht = dz->dz_hash; - dz->dz_hash = ht; - dz->dz_hashmask = new_hashmask; - dz->dz_divisor = new_divisor; - dn_rebuild_zone(dz, old_ht, old_divisor); - write_unlock_bh(&dn_fib_tables_lock); - kfree(old_ht); -} - -static void dn_free_node(struct dn_fib_node *f) -{ - dn_fib_release_info(DN_FIB_INFO(f)); - kmem_cache_free(dn_hash_kmem, f); -} - - -static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) -{ - int i; - struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL); - if (!dz) - return NULL; - - if (z) { - dz->dz_divisor = 16; - dz->dz_hashmask = 0x0F; - } else { - dz->dz_divisor = 1; - dz->dz_hashmask = 0; - } - - dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL); - if (!dz->dz_hash) { - kfree(dz); - return NULL; - } - - dz->dz_order = z; - dz->dz_mask = dnet_make_mask(z); - - for(i = z + 1; i <= 16; i++) - if (table->dh_zones[i]) - break; - - write_lock_bh(&dn_fib_tables_lock); - if (i>16) { - dz->dz_next = table->dh_zone_list; - table->dh_zone_list = dz; - } else { - dz->dz_next = table->dh_zones[i]->dz_next; - table->dh_zones[i]->dz_next = dz; - } - table->dh_zones[z] = dz; - write_unlock_bh(&dn_fib_tables_lock); - return dz; -} - - -static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct nlattr *attrs[], struct dn_fib_info *fi) -{ - struct rtnexthop *nhp; - int nhlen; - - if (attrs[RTA_PRIORITY] && - nla_get_u32(attrs[RTA_PRIORITY]) != fi->fib_priority) - return 1; - - if (attrs[RTA_OIF] || attrs[RTA_GATEWAY]) { - if ((!attrs[RTA_OIF] || nla_get_u32(attrs[RTA_OIF]) == fi->fib_nh->nh_oif) && - (!attrs[RTA_GATEWAY] || nla_get_le16(attrs[RTA_GATEWAY]) != fi->fib_nh->nh_gw)) - return 0; - return 1; - } - - if (!attrs[RTA_MULTIPATH]) - return 0; - - nhp = nla_data(attrs[RTA_MULTIPATH]); - nhlen = nla_len(attrs[RTA_MULTIPATH]); - - for_nexthops(fi) { - int attrlen = nhlen - sizeof(struct rtnexthop); - __le16 gw; - - if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) - return -EINVAL; - if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif) - return 1; - if (attrlen) { - struct nlattr *gw_attr; - - gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY); - gw = gw_attr ? nla_get_le16(gw_attr) : 0; - - if (gw && gw != nh->nh_gw) - return 1; - } - nhp = RTNH_NEXT(nhp); - } endfor_nexthops(fi); - - return 0; -} - -static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi) -{ - size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) - + nla_total_size(4) /* RTA_TABLE */ - + nla_total_size(2) /* RTA_DST */ - + nla_total_size(4) /* RTA_PRIORITY */ - + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */ - - /* space for nested metrics */ - payload += nla_total_size((RTAX_MAX * nla_total_size(4))); - - if (fi->fib_nhs) { - /* Also handles the special case fib_nhs == 1 */ - - /* each nexthop is packed in an attribute */ - size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); - - /* may contain a gateway attribute */ - nhsize += nla_total_size(4); - - /* all nexthops are packed in a nested attribute */ - payload += nla_total_size(fi->fib_nhs * nhsize); - } - - return payload; -} - -static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, - u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, - struct dn_fib_info *fi, unsigned int flags) -{ - struct rtmsg *rtm; - struct nlmsghdr *nlh; - - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); - if (!nlh) - return -EMSGSIZE; - - rtm = nlmsg_data(nlh); - rtm->rtm_family = AF_DECnet; - rtm->rtm_dst_len = dst_len; - rtm->rtm_src_len = 0; - rtm->rtm_tos = 0; - rtm->rtm_table = tb_id; - rtm->rtm_flags = fi->fib_flags; - rtm->rtm_scope = scope; - rtm->rtm_type = type; - rtm->rtm_protocol = fi->fib_protocol; - - if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0) - goto errout; - - if (rtm->rtm_dst_len && - nla_put(skb, RTA_DST, 2, dst) < 0) - goto errout; - - if (fi->fib_priority && - nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0) - goto errout; - - if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) - goto errout; - - if (fi->fib_nhs == 1) { - if (fi->fib_nh->nh_gw && - nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0) - goto errout; - - if (fi->fib_nh->nh_oif && - nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0) - goto errout; - } - - if (fi->fib_nhs > 1) { - struct rtnexthop *nhp; - struct nlattr *mp_head; - - mp_head = nla_nest_start_noflag(skb, RTA_MULTIPATH); - if (!mp_head) - goto errout; - - for_nexthops(fi) { - if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) - goto errout; - - nhp->rtnh_flags = nh->nh_flags & 0xFF; - nhp->rtnh_hops = nh->nh_weight - 1; - nhp->rtnh_ifindex = nh->nh_oif; - - if (nh->nh_gw && - nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0) - goto errout; - - nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp; - } endfor_nexthops(fi); - - nla_nest_end(skb, mp_head); - } - - nlmsg_end(skb, nlh); - return 0; - -errout: - nlmsg_cancel(skb, nlh); - return -EMSGSIZE; -} - - -static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id, - struct nlmsghdr *nlh, struct netlink_skb_parms *req) -{ - struct sk_buff *skb; - u32 portid = req ? req->portid : 0; - int err = -ENOBUFS; - - skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL); - if (skb == NULL) - goto errout; - - err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id, - f->fn_type, f->fn_scope, &f->fn_key, z, - DN_FIB_INFO(f), 0); - if (err < 0) { - /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */ - WARN_ON(err == -EMSGSIZE); - kfree_skb(skb); - goto errout; - } - rtnl_notify(skb, &init_net, portid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL); - return; -errout: - if (err < 0) - rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_ROUTE, err); -} - -static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, - struct netlink_callback *cb, - struct dn_fib_table *tb, - struct dn_zone *dz, - struct dn_fib_node *f) -{ - int i, s_i; - - s_i = cb->args[4]; - for(i = 0; f; i++, f = f->fn_next) { - if (i < s_i) - continue; - if (f->fn_state & DN_S_ZOMBIE) - continue; - if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_NEWROUTE, - tb->n, - (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type, - f->fn_scope, &f->fn_key, dz->dz_order, - f->fn_info, NLM_F_MULTI) < 0) { - cb->args[4] = i; - return -1; - } - } - cb->args[4] = i; - return skb->len; -} - -static __inline__ int dn_hash_dump_zone(struct sk_buff *skb, - struct netlink_callback *cb, - struct dn_fib_table *tb, - struct dn_zone *dz) -{ - int h, s_h; - - s_h = cb->args[3]; - for(h = 0; h < dz->dz_divisor; h++) { - if (h < s_h) - continue; - if (h > s_h) - memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0])); - if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL) - continue; - if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) { - cb->args[3] = h; - return -1; - } - } - cb->args[3] = h; - return skb->len; -} - -static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, - struct netlink_callback *cb) -{ - int m, s_m; - struct dn_zone *dz; - struct dn_hash *table = (struct dn_hash *)tb->data; - - s_m = cb->args[2]; - read_lock(&dn_fib_tables_lock); - for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) { - if (m < s_m) - continue; - if (m > s_m) - memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0])); - - if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) { - cb->args[2] = m; - read_unlock(&dn_fib_tables_lock); - return -1; - } - } - read_unlock(&dn_fib_tables_lock); - cb->args[2] = m; - - return skb->len; -} - -int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct net *net = sock_net(skb->sk); - unsigned int h, s_h; - unsigned int e = 0, s_e; - struct dn_fib_table *tb; - int dumped = 0; - - if (!net_eq(net, &init_net)) - return 0; - - if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && - ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED) - return dn_cache_dump(skb, cb); - - s_h = cb->args[0]; - s_e = cb->args[1]; - - for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { - e = 0; - hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) { - if (e < s_e) - goto next; - if (dumped) - memset(&cb->args[2], 0, sizeof(cb->args) - - 2 * sizeof(cb->args[0])); - if (tb->dump(tb, skb, cb) < 0) - goto out; - dumped = 1; -next: - e++; - } - } -out: - cb->args[1] = e; - cb->args[0] = h; - - return skb->len; -} - -static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[], - struct nlmsghdr *n, struct netlink_skb_parms *req) -{ - struct dn_hash *table = (struct dn_hash *)tb->data; - struct dn_fib_node *new_f, *f, **fp, **del_fp; - struct dn_zone *dz; - struct dn_fib_info *fi; - int z = r->rtm_dst_len; - int type = r->rtm_type; - dn_fib_key_t key; - int err; - - if (z > 16) - return -EINVAL; - - dz = table->dh_zones[z]; - if (!dz && !(dz = dn_new_zone(table, z))) - return -ENOBUFS; - - dz_key_0(key); - if (attrs[RTA_DST]) { - __le16 dst = nla_get_le16(attrs[RTA_DST]); - if (dst & ~DZ_MASK(dz)) - return -EINVAL; - key = dz_key(dst, dz); - } - - if ((fi = dn_fib_create_info(r, attrs, n, &err)) == NULL) - return err; - - if (dz->dz_nent > (dz->dz_divisor << 2) && - dz->dz_divisor > DN_MAX_DIVISOR && - (z==16 || (1<<z) > dz->dz_divisor)) - dn_rehash_zone(dz); - - fp = dn_chain_p(key, dz); - - DN_FIB_SCAN(f, fp) { - if (dn_key_leq(key, f->fn_key)) - break; - } - - del_fp = NULL; - - if (f && (f->fn_state & DN_S_ZOMBIE) && - dn_key_eq(f->fn_key, key)) { - del_fp = fp; - fp = &f->fn_next; - f = *fp; - goto create; - } - - DN_FIB_SCAN_KEY(f, fp, key) { - if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority) - break; - } - - if (f && dn_key_eq(f->fn_key, key) && - fi->fib_priority == DN_FIB_INFO(f)->fib_priority) { - struct dn_fib_node **ins_fp; - - err = -EEXIST; - if (n->nlmsg_flags & NLM_F_EXCL) - goto out; - - if (n->nlmsg_flags & NLM_F_REPLACE) { - del_fp = fp; - fp = &f->fn_next; - f = *fp; - goto replace; - } - - ins_fp = fp; - err = -EEXIST; - - DN_FIB_SCAN_KEY(f, fp, key) { - if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority) - break; - if (f->fn_type == type && - f->fn_scope == r->rtm_scope && - DN_FIB_INFO(f) == fi) - goto out; - } - - if (!(n->nlmsg_flags & NLM_F_APPEND)) { - fp = ins_fp; - f = *fp; - } - } - -create: - err = -ENOENT; - if (!(n->nlmsg_flags & NLM_F_CREATE)) - goto out; - -replace: - err = -ENOBUFS; - new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL); - if (new_f == NULL) - goto out; - - new_f->fn_key = key; - new_f->fn_type = type; - new_f->fn_scope = r->rtm_scope; - DN_FIB_INFO(new_f) = fi; - - new_f->fn_next = f; - write_lock_bh(&dn_fib_tables_lock); - *fp = new_f; - write_unlock_bh(&dn_fib_tables_lock); - dz->dz_nent++; - - if (del_fp) { - f = *del_fp; - write_lock_bh(&dn_fib_tables_lock); - *del_fp = f->fn_next; - write_unlock_bh(&dn_fib_tables_lock); - - if (!(f->fn_state & DN_S_ZOMBIE)) - dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); - if (f->fn_state & DN_S_ACCESSED) - dn_rt_cache_flush(-1); - dn_free_node(f); - dz->dz_nent--; - } else { - dn_rt_cache_flush(-1); - } - - dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req); - - return 0; -out: - dn_fib_release_info(fi); - return err; -} - - -static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[], - struct nlmsghdr *n, struct netlink_skb_parms *req) -{ - struct dn_hash *table = (struct dn_hash*)tb->data; - struct dn_fib_node **fp, **del_fp, *f; - int z = r->rtm_dst_len; - struct dn_zone *dz; - dn_fib_key_t key; - int matched; - - - if (z > 16) - return -EINVAL; - - if ((dz = table->dh_zones[z]) == NULL) - return -ESRCH; - - dz_key_0(key); - if (attrs[RTA_DST]) { - __le16 dst = nla_get_le16(attrs[RTA_DST]); - if (dst & ~DZ_MASK(dz)) - return -EINVAL; - key = dz_key(dst, dz); - } - - fp = dn_chain_p(key, dz); - - DN_FIB_SCAN(f, fp) { - if (dn_key_eq(f->fn_key, key)) - break; - if (dn_key_leq(key, f->fn_key)) - return -ESRCH; - } - - matched = 0; - del_fp = NULL; - DN_FIB_SCAN_KEY(f, fp, key) { - struct dn_fib_info *fi = DN_FIB_INFO(f); - - if (f->fn_state & DN_S_ZOMBIE) - return -ESRCH; - - matched++; - - if (del_fp == NULL && - (!r->rtm_type || f->fn_type == r->rtm_type) && - (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) && - (!r->rtm_protocol || - fi->fib_protocol == r->rtm_protocol) && - dn_fib_nh_match(r, n, attrs, fi) == 0) - del_fp = fp; - } - - if (del_fp) { - f = *del_fp; - dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); - - if (matched != 1) { - write_lock_bh(&dn_fib_tables_lock); - *del_fp = f->fn_next; - write_unlock_bh(&dn_fib_tables_lock); - - if (f->fn_state & DN_S_ACCESSED) - dn_rt_cache_flush(-1); - dn_free_node(f); - dz->dz_nent--; - } else { - f->fn_state |= DN_S_ZOMBIE; - if (f->fn_state & DN_S_ACCESSED) { - f->fn_state &= ~DN_S_ACCESSED; - dn_rt_cache_flush(-1); - } - if (++dn_fib_hash_zombies > 128) - dn_fib_flush(); - } - - return 0; - } - - return -ESRCH; -} - -static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table) -{ - int found = 0; - struct dn_fib_node *f; - - while((f = *fp) != NULL) { - struct dn_fib_info *fi = DN_FIB_INFO(f); - - if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) { - write_lock_bh(&dn_fib_tables_lock); - *fp = f->fn_next; - write_unlock_bh(&dn_fib_tables_lock); - - dn_free_node(f); - found++; - continue; - } - fp = &f->fn_next; - } - - return found; -} - -static int dn_fib_table_flush(struct dn_fib_table *tb) -{ - struct dn_hash *table = (struct dn_hash *)tb->data; - struct dn_zone *dz; - int found = 0; - - dn_fib_hash_zombies = 0; - for(dz = table->dh_zone_list; dz; dz = dz->dz_next) { - int i; - int tmp = 0; - for(i = dz->dz_divisor-1; i >= 0; i--) - tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table); - dz->dz_nent -= tmp; - found += tmp; - } - - return found; -} - -static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowidn *flp, struct dn_fib_res *res) -{ - int err; - struct dn_zone *dz; - struct dn_hash *t = (struct dn_hash *)tb->data; - - read_lock(&dn_fib_tables_lock); - for(dz = t->dh_zone_list; dz; dz = dz->dz_next) { - struct dn_fib_node *f; - dn_fib_key_t k = dz_key(flp->daddr, dz); - - for(f = dz_chain(k, dz); f; f = f->fn_next) { - if (!dn_key_eq(k, f->fn_key)) { - if (dn_key_leq(k, f->fn_key)) - break; - else - continue; - } - - f->fn_state |= DN_S_ACCESSED; - - if (f->fn_state&DN_S_ZOMBIE) - continue; - - if (f->fn_scope < flp->flowidn_scope) - continue; - - err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res); - - if (err == 0) { - res->type = f->fn_type; - res->scope = f->fn_scope; - res->prefixlen = dz->dz_order; - goto out; - } - if (err < 0) - goto out; - } - } - err = 1; -out: - read_unlock(&dn_fib_tables_lock); - return err; -} - - -struct dn_fib_table *dn_fib_get_table(u32 n, int create) -{ - struct dn_fib_table *t; - unsigned int h; - - if (n < RT_TABLE_MIN) - return NULL; - - if (n > RT_TABLE_MAX) - return NULL; - - h = n & (DN_FIB_TABLE_HASHSZ - 1); - rcu_read_lock(); - hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) { - if (t->n == n) { - rcu_read_unlock(); - return t; - } - } - rcu_read_unlock(); - - if (!create) - return NULL; - - if (in_interrupt()) { - net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n"); - return NULL; - } - - t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), - GFP_KERNEL); - if (t == NULL) - return NULL; - - t->n = n; - t->insert = dn_fib_table_insert; - t->delete = dn_fib_table_delete; - t->lookup = dn_fib_table_lookup; - t->flush = dn_fib_table_flush; - t->dump = dn_fib_table_dump; - hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]); - - return t; -} - -struct dn_fib_table *dn_fib_empty_table(void) -{ - u32 id; - - for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++) - if (dn_fib_get_table(id, 0) == NULL) - return dn_fib_get_table(id, 1); - return NULL; -} - -void dn_fib_flush(void) -{ - int flushed = 0; - struct dn_fib_table *tb; - unsigned int h; - - for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { - hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) - flushed += tb->flush(tb); - } - - if (flushed) - dn_rt_cache_flush(-1); -} - -void __init dn_fib_table_init(void) -{ - dn_hash_kmem = kmem_cache_create("dn_fib_info_cache", - sizeof(struct dn_fib_info), - 0, SLAB_HWCACHE_ALIGN, - NULL); -} - -void __exit dn_fib_table_cleanup(void) -{ - struct dn_fib_table *t; - struct hlist_node *next; - unsigned int h; - - write_lock(&dn_fib_tables_lock); - for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { - hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h], - hlist) { - hlist_del(&t->hlist); - kfree(t); - } - } - write_unlock(&dn_fib_tables_lock); -} diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c deleted file mode 100644 index aa4155875ca8..000000000000 --- a/net/decnet/dn_timer.c +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Socket Timer Functions - * - * Author: Steve Whitehouse <SteveW@ACM.org> - * - * - * Changes: - * Steve Whitehouse : Made keepalive timer part of the same - * timer idea. - * Steve Whitehouse : Added checks for sk->sock_readers - * David S. Miller : New socket locking - * Steve Whitehouse : Timer grabs socket ref. - */ -#include <linux/net.h> -#include <linux/socket.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/timer.h> -#include <linux/spinlock.h> -#include <net/sock.h> -#include <linux/atomic.h> -#include <linux/jiffies.h> -#include <net/flow.h> -#include <net/dn.h> - -/* - * Slow timer is for everything else (n * 500mS) - */ - -#define SLOW_INTERVAL (HZ/2) - -static void dn_slow_timer(struct timer_list *t); - -void dn_start_slow_timer(struct sock *sk) -{ - timer_setup(&sk->sk_timer, dn_slow_timer, 0); - sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); -} - -void dn_stop_slow_timer(struct sock *sk) -{ - sk_stop_timer(sk, &sk->sk_timer); -} - -static void dn_slow_timer(struct timer_list *t) -{ - struct sock *sk = from_timer(sk, t, sk_timer); - struct dn_scp *scp = DN_SK(sk); - - bh_lock_sock(sk); - - if (sock_owned_by_user(sk)) { - sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10); - goto out; - } - - /* - * The persist timer is the standard slow timer used for retransmits - * in both connection establishment and disconnection as well as - * in the RUN state. The different states are catered for by changing - * the function pointer in the socket. Setting the timer to a value - * of zero turns it off. We allow the persist_fxn to turn the - * timer off in a permant way by returning non-zero, so that - * timer based routines may remove sockets. This is why we have a - * sock_hold()/sock_put() around the timer to prevent the socket - * going away in the middle. - */ - if (scp->persist && scp->persist_fxn) { - if (scp->persist <= SLOW_INTERVAL) { - scp->persist = 0; - - if (scp->persist_fxn(sk)) - goto out; - } else { - scp->persist -= SLOW_INTERVAL; - } - } - - /* - * Check for keepalive timeout. After the other timer 'cos if - * the previous timer caused a retransmit, we don't need to - * do this. scp->stamp is the last time that we sent a packet. - * The keepalive function sends a link service packet to the - * other end. If it remains unacknowledged, the standard - * socket timers will eventually shut the socket down. Each - * time we do this, scp->stamp will be updated, thus - * we won't try and send another until scp->keepalive has passed - * since the last successful transmission. - */ - if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) { - if (time_after_eq(jiffies, scp->stamp + scp->keepalive)) - scp->keepalive_fxn(sk); - } - - sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); -out: - bh_unlock_sock(sk); - sock_put(sk); -} diff --git a/net/decnet/netfilter/Kconfig b/net/decnet/netfilter/Kconfig deleted file mode 100644 index 14ec4ef95fab..000000000000 --- a/net/decnet/netfilter/Kconfig +++ /dev/null @@ -1,17 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# DECnet netfilter configuration -# - -menu "DECnet: Netfilter Configuration" - depends on DECNET && NETFILTER - depends on NETFILTER_ADVANCED - -config DECNET_NF_GRABULATOR - tristate "Routing message grabulator (for userland routing daemon)" - help - Enable this module if you want to use the userland DECnet routing - daemon. You will also need to enable routing support for DECnet - unless you just want to monitor routing messages from other nodes. - -endmenu diff --git a/net/decnet/netfilter/Makefile b/net/decnet/netfilter/Makefile deleted file mode 100644 index 429c84289d0f..000000000000 --- a/net/decnet/netfilter/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for DECnet netfilter modules -# - -obj-$(CONFIG_DECNET_NF_GRABULATOR) += dn_rtmsg.o diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c deleted file mode 100644 index 26a9193df783..000000000000 --- a/net/decnet/netfilter/dn_rtmsg.c +++ /dev/null @@ -1,158 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet Routing Message Grabulator - * - * (C) 2000 ChyGwyn Limited - https://www.chygwyn.com/ - * - * Author: Steven Whitehouse <steve@chygwyn.com> - */ -#include <linux/module.h> -#include <linux/skbuff.h> -#include <linux/slab.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/netfilter.h> -#include <linux/spinlock.h> -#include <net/netlink.h> -#include <linux/netfilter_decnet.h> - -#include <net/sock.h> -#include <net/flow.h> -#include <net/dn.h> -#include <net/dn_route.h> - -static struct sock *dnrmg = NULL; - - -static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp) -{ - struct sk_buff *skb = NULL; - size_t size; - sk_buff_data_t old_tail; - struct nlmsghdr *nlh; - unsigned char *ptr; - struct nf_dn_rtmsg *rtm; - - size = NLMSG_ALIGN(rt_skb->len) + - NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)); - skb = nlmsg_new(size, GFP_ATOMIC); - if (!skb) { - *errp = -ENOMEM; - return NULL; - } - old_tail = skb->tail; - nlh = nlmsg_put(skb, 0, 0, 0, size, 0); - if (!nlh) { - kfree_skb(skb); - *errp = -ENOMEM; - return NULL; - } - rtm = (struct nf_dn_rtmsg *)nlmsg_data(nlh); - rtm->nfdn_ifindex = rt_skb->dev->ifindex; - ptr = NFDN_RTMSG(rtm); - skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len); - nlh->nlmsg_len = skb->tail - old_tail; - return skb; -} - -static void dnrmg_send_peer(struct sk_buff *skb) -{ - struct sk_buff *skb2; - int status = 0; - int group = 0; - unsigned char flags = *skb->data; - - switch (flags & DN_RT_CNTL_MSK) { - case DN_RT_PKT_L1RT: - group = DNRNG_NLGRP_L1; - break; - case DN_RT_PKT_L2RT: - group = DNRNG_NLGRP_L2; - break; - default: - return; - } - - skb2 = dnrmg_build_message(skb, &status); - if (skb2 == NULL) - return; - NETLINK_CB(skb2).dst_group = group; - netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC); -} - - -static unsigned int dnrmg_hook(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - dnrmg_send_peer(skb); - return NF_ACCEPT; -} - - -#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err), NULL); return; } while (0) - -static inline void dnrmg_receive_user_skb(struct sk_buff *skb) -{ - struct nlmsghdr *nlh = nlmsg_hdr(skb); - - if (skb->len < sizeof(*nlh) || - nlh->nlmsg_len < sizeof(*nlh) || - skb->len < nlh->nlmsg_len) - return; - - if (!netlink_capable(skb, CAP_NET_ADMIN)) - RCV_SKB_FAIL(-EPERM); - - /* Eventually we might send routing messages too */ - - RCV_SKB_FAIL(-EINVAL); -} - -static const struct nf_hook_ops dnrmg_ops = { - .hook = dnrmg_hook, - .pf = NFPROTO_DECNET, - .hooknum = NF_DN_ROUTE, - .priority = NF_DN_PRI_DNRTMSG, -}; - -static int __init dn_rtmsg_init(void) -{ - int rv = 0; - struct netlink_kernel_cfg cfg = { - .groups = DNRNG_NLGRP_MAX, - .input = dnrmg_receive_user_skb, - }; - - dnrmg = netlink_kernel_create(&init_net, NETLINK_DNRTMSG, &cfg); - if (dnrmg == NULL) { - printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); - return -ENOMEM; - } - - rv = nf_register_net_hook(&init_net, &dnrmg_ops); - if (rv) { - netlink_kernel_release(dnrmg); - } - - return rv; -} - -static void __exit dn_rtmsg_fini(void) -{ - nf_unregister_net_hook(&init_net, &dnrmg_ops); - netlink_kernel_release(dnrmg); -} - - -MODULE_DESCRIPTION("DECnet Routing Message Grabulator"); -MODULE_AUTHOR("Steven Whitehouse <steve@chygwyn.com>"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_DNRTMSG); - -module_init(dn_rtmsg_init); -module_exit(dn_rtmsg_fini); diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c deleted file mode 100644 index 67b5ab2657b7..000000000000 --- a/net/decnet/sysctl_net_decnet.c +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DECnet An implementation of the DECnet protocol suite for the LINUX - * operating system. DECnet is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * DECnet sysctl support functions - * - * Author: Steve Whitehouse <SteveW@ACM.org> - * - * - * Changes: - * Steve Whitehouse - C99 changes and default device handling - * Steve Whitehouse - Memory buffer settings, like the tcp ones - * - */ -#include <linux/mm.h> -#include <linux/sysctl.h> -#include <linux/fs.h> -#include <linux/netdevice.h> -#include <linux/string.h> -#include <net/neighbour.h> -#include <net/dst.h> -#include <net/flow.h> - -#include <linux/uaccess.h> - -#include <net/dn.h> -#include <net/dn_dev.h> -#include <net/dn_route.h> - - -int decnet_debug_level; -int decnet_time_wait = 30; -int decnet_dn_count = 1; -int decnet_di_count = 3; -int decnet_dr_count = 3; -int decnet_log_martians = 1; -int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; - -/* Reasonable defaults, I hope, based on tcp's defaults */ -long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; -int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; -int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; - -#ifdef CONFIG_SYSCTL -extern int decnet_dst_gc_interval; -static int min_decnet_time_wait[] = { 5 }; -static int max_decnet_time_wait[] = { 600 }; -static int min_state_count[] = { 1 }; -static int max_state_count[] = { NSP_MAXRXTSHIFT }; -static int min_decnet_dst_gc_interval[] = { 1 }; -static int max_decnet_dst_gc_interval[] = { 60 }; -static int min_decnet_no_fc_max_cwnd[] = { NSP_MIN_WINDOW }; -static int max_decnet_no_fc_max_cwnd[] = { NSP_MAX_WINDOW }; -static char node_name[7] = "???"; - -static struct ctl_table_header *dn_table_header = NULL; - -/* - * ctype.h :-) - */ -#define ISNUM(x) (((x) >= '0') && ((x) <= '9')) -#define ISLOWER(x) (((x) >= 'a') && ((x) <= 'z')) -#define ISUPPER(x) (((x) >= 'A') && ((x) <= 'Z')) -#define ISALPHA(x) (ISLOWER(x) || ISUPPER(x)) -#define INVALID_END_CHAR(x) (ISNUM(x) || ISALPHA(x)) - -static void strip_it(char *str) -{ - for(;;) { - switch (*str) { - case ' ': - case '\n': - case '\r': - case ':': - *str = 0; - fallthrough; - case 0: - return; - } - str++; - } -} - -/* - * Simple routine to parse an ascii DECnet address - * into a network order address. - */ -static int parse_addr(__le16 *addr, char *str) -{ - __u16 area, node; - - while(*str && !ISNUM(*str)) str++; - - if (*str == 0) - return -1; - - area = (*str++ - '0'); - if (ISNUM(*str)) { - area *= 10; - area += (*str++ - '0'); - } - - if (*str++ != '.') - return -1; - - if (!ISNUM(*str)) - return -1; - - node = *str++ - '0'; - if (ISNUM(*str)) { - node *= 10; - node += (*str++ - '0'); - } - if (ISNUM(*str)) { - node *= 10; - node += (*str++ - '0'); - } - if (ISNUM(*str)) { - node *= 10; - node += (*str++ - '0'); - } - - if ((node > 1023) || (area > 63)) - return -1; - - if (INVALID_END_CHAR(*str)) - return -1; - - *addr = cpu_to_le16((area << 10) | node); - - return 0; -} - -static int dn_node_address_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - char addr[DN_ASCBUF_LEN]; - size_t len; - __le16 dnaddr; - - if (!*lenp || (*ppos && !write)) { - *lenp = 0; - return 0; - } - - if (write) { - len = (*lenp < DN_ASCBUF_LEN) ? *lenp : (DN_ASCBUF_LEN-1); - memcpy(addr, buffer, len); - addr[len] = 0; - strip_it(addr); - - if (parse_addr(&dnaddr, addr)) - return -EINVAL; - - dn_dev_devices_off(); - - decnet_address = dnaddr; - - dn_dev_devices_on(); - - *ppos += len; - - return 0; - } - - dn_addr2asc(le16_to_cpu(decnet_address), addr); - len = strlen(addr); - addr[len++] = '\n'; - - if (len > *lenp) - len = *lenp; - memcpy(buffer, addr, len); - *lenp = len; - *ppos += len; - - return 0; -} - -static int dn_def_dev_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - size_t len; - struct net_device *dev; - char devname[17]; - - if (!*lenp || (*ppos && !write)) { - *lenp = 0; - return 0; - } - - if (write) { - if (*lenp > 16) - return -E2BIG; - - memcpy(devname, buffer, *lenp); - devname[*lenp] = 0; - strip_it(devname); - - dev = dev_get_by_name(&init_net, devname); - if (dev == NULL) - return -ENODEV; - - if (dev->dn_ptr == NULL) { - dev_put(dev); - return -ENODEV; - } - - if (dn_dev_set_default(dev, 1)) { - dev_put(dev); - return -ENODEV; - } - *ppos += *lenp; - - return 0; - } - - dev = dn_dev_get_default(); - if (dev == NULL) { - *lenp = 0; - return 0; - } - - strcpy(devname, dev->name); - dev_put(dev); - len = strlen(devname); - devname[len++] = '\n'; - - if (len > *lenp) len = *lenp; - - memcpy(buffer, devname, len); - *lenp = len; - *ppos += len; - - return 0; -} - -static struct ctl_table dn_table[] = { - { - .procname = "node_address", - .maxlen = 7, - .mode = 0644, - .proc_handler = dn_node_address_handler, - }, - { - .procname = "node_name", - .data = node_name, - .maxlen = 7, - .mode = 0644, - .proc_handler = proc_dostring, - }, - { - .procname = "default_device", - .maxlen = 16, - .mode = 0644, - .proc_handler = dn_def_dev_handler, - }, - { - .procname = "time_wait", - .data = &decnet_time_wait, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_decnet_time_wait, - .extra2 = &max_decnet_time_wait - }, - { - .procname = "dn_count", - .data = &decnet_dn_count, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_state_count, - .extra2 = &max_state_count - }, - { - .procname = "di_count", - .data = &decnet_di_count, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_state_count, - .extra2 = &max_state_count - }, - { - .procname = "dr_count", - .data = &decnet_dr_count, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_state_count, - .extra2 = &max_state_count - }, - { - .procname = "dst_gc_interval", - .data = &decnet_dst_gc_interval, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_decnet_dst_gc_interval, - .extra2 = &max_decnet_dst_gc_interval - }, - { - .procname = "no_fc_max_cwnd", - .data = &decnet_no_fc_max_cwnd, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_decnet_no_fc_max_cwnd, - .extra2 = &max_decnet_no_fc_max_cwnd - }, - { - .procname = "decnet_mem", - .data = &sysctl_decnet_mem, - .maxlen = sizeof(sysctl_decnet_mem), - .mode = 0644, - .proc_handler = proc_doulongvec_minmax - }, - { - .procname = "decnet_rmem", - .data = &sysctl_decnet_rmem, - .maxlen = sizeof(sysctl_decnet_rmem), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "decnet_wmem", - .data = &sysctl_decnet_wmem, - .maxlen = sizeof(sysctl_decnet_wmem), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "debug", - .data = &decnet_debug_level, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { } -}; - -void dn_register_sysctl(void) -{ - dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table); -} - -void dn_unregister_sysctl(void) -{ - unregister_net_sysctl_table(dn_table_header); -} - -#else /* CONFIG_SYSCTL */ -void dn_unregister_sysctl(void) -{ -} -void dn_register_sysctl(void) -{ -} - -#endif diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index cac48a741f27..ed56c7a554b8 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -469,10 +469,16 @@ static int dsa_port_setup(struct dsa_port *dp) dsa_port_disable(dp); break; case DSA_PORT_TYPE_CPU: - err = dsa_port_link_register_of(dp); - if (err) - break; - dsa_port_link_registered = true; + if (dp->dn) { + err = dsa_shared_port_link_register_of(dp); + if (err) + break; + dsa_port_link_registered = true; + } else { + dev_warn(ds->dev, + "skipping link registration for CPU port %d\n", + dp->index); + } err = dsa_port_enable(dp, NULL); if (err) @@ -481,10 +487,16 @@ static int dsa_port_setup(struct dsa_port *dp) break; case DSA_PORT_TYPE_DSA: - err = dsa_port_link_register_of(dp); - if (err) - break; - dsa_port_link_registered = true; + if (dp->dn) { + err = dsa_shared_port_link_register_of(dp); + if (err) + break; + dsa_port_link_registered = true; + } else { + dev_warn(ds->dev, + "skipping link registration for DSA port %d\n", + dp->index); + } err = dsa_port_enable(dp, NULL); if (err) @@ -505,7 +517,7 @@ static int dsa_port_setup(struct dsa_port *dp) if (err && dsa_port_enabled) dsa_port_disable(dp); if (err && dsa_port_link_registered) - dsa_port_link_unregister_of(dp); + dsa_shared_port_link_unregister_of(dp); if (err) { if (ds->ops->port_teardown) ds->ops->port_teardown(ds, dp->index); @@ -577,11 +589,13 @@ static void dsa_port_teardown(struct dsa_port *dp) break; case DSA_PORT_TYPE_CPU: dsa_port_disable(dp); - dsa_port_link_unregister_of(dp); + if (dp->dn) + dsa_shared_port_link_unregister_of(dp); break; case DSA_PORT_TYPE_DSA: dsa_port_disable(dp); - dsa_port_link_unregister_of(dp); + if (dp->dn) + dsa_shared_port_link_unregister_of(dp); break; case DSA_PORT_TYPE_USER: if (dp->slave) { @@ -1046,26 +1060,24 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) static int dsa_tree_setup_master(struct dsa_switch_tree *dst) { - struct dsa_port *dp; + struct dsa_port *cpu_dp; int err = 0; rtnl_lock(); - list_for_each_entry(dp, &dst->ports, list) { - if (dsa_port_is_cpu(dp)) { - struct net_device *master = dp->master; - bool admin_up = (master->flags & IFF_UP) && - !qdisc_tx_is_noop(master); + dsa_tree_for_each_cpu_port(cpu_dp, dst) { + struct net_device *master = cpu_dp->master; + bool admin_up = (master->flags & IFF_UP) && + !qdisc_tx_is_noop(master); - err = dsa_master_setup(master, dp); - if (err) - break; + err = dsa_master_setup(master, cpu_dp); + if (err) + break; - /* Replay master state event */ - dsa_tree_master_admin_state_change(dst, master, admin_up); - dsa_tree_master_oper_state_change(dst, master, - netif_oper_up(master)); - } + /* Replay master state event */ + dsa_tree_master_admin_state_change(dst, master, admin_up); + dsa_tree_master_oper_state_change(dst, master, + netif_oper_up(master)); } rtnl_unlock(); @@ -1075,22 +1087,20 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst) static void dsa_tree_teardown_master(struct dsa_switch_tree *dst) { - struct dsa_port *dp; + struct dsa_port *cpu_dp; rtnl_lock(); - list_for_each_entry(dp, &dst->ports, list) { - if (dsa_port_is_cpu(dp)) { - struct net_device *master = dp->master; + dsa_tree_for_each_cpu_port(cpu_dp, dst) { + struct net_device *master = cpu_dp->master; - /* Synthesizing an "admin down" state is sufficient for - * the switches to get a notification if the master is - * currently up and running. - */ - dsa_tree_master_admin_state_change(dst, master, false); + /* Synthesizing an "admin down" state is sufficient for + * the switches to get a notification if the master is + * currently up and running. + */ + dsa_tree_master_admin_state_change(dst, master, false); - dsa_master_teardown(master); - } + dsa_master_teardown(master); } rtnl_unlock(); @@ -1238,7 +1248,6 @@ out_disconnect: * they would have formed disjoint trees (different "dsa,member" values). */ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, - struct net_device *master, const struct dsa_device_ops *tag_ops, const struct dsa_device_ops *old_tag_ops) { @@ -1254,14 +1263,11 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, * attempts to change the tagging protocol. If we ever lift the IFF_UP * restriction, there needs to be another mutex which serializes this. */ - if (master->flags & IFF_UP) - goto out_unlock; - list_for_each_entry(dp, &dst->ports, list) { - if (!dsa_port_is_user(dp)) - continue; + if (dsa_port_is_cpu(dp) && (dp->master->flags & IFF_UP)) + goto out_unlock; - if (dp->slave->flags & IFF_UP) + if (dsa_port_is_user(dp) && (dp->slave->flags & IFF_UP)) goto out_unlock; } diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index d9722e49864b..614fbba8fe39 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -285,8 +285,8 @@ int dsa_port_mrp_add_ring_role(const struct dsa_port *dp, int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, const struct switchdev_obj_ring_role_mrp *mrp); int dsa_port_phylink_create(struct dsa_port *dp); -int dsa_port_link_register_of(struct dsa_port *dp); -void dsa_port_link_unregister_of(struct dsa_port *dp); +int dsa_shared_port_link_register_of(struct dsa_port *dp); +void dsa_shared_port_link_unregister_of(struct dsa_port *dp); int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr); void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); @@ -545,7 +545,6 @@ struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst, int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v); int dsa_broadcast(unsigned long e, void *v); int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, - struct net_device *master, const struct dsa_device_ops *tag_ops, const struct dsa_device_ops *old_tag_ops); void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst, diff --git a/net/dsa/master.c b/net/dsa/master.c index 2851e44c4cf0..fb810edc8281 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -58,7 +58,7 @@ static void dsa_master_get_regs(struct net_device *dev, } cpu_info = (struct ethtool_drvinfo *)data; - strlcpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver)); + strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver)); data += sizeof(*cpu_info); cpu_regs = (struct ethtool_regs *)data; data += sizeof(*cpu_regs); @@ -307,7 +307,7 @@ static ssize_t tagging_store(struct device *d, struct device_attribute *attr, */ goto out; - err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, dev, new_tag_ops, + err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops, old_tag_ops); if (err) { /* On failure the old tagger is restored, so we don't need the diff --git a/net/dsa/port.c b/net/dsa/port.c index 2dd76eb1621c..7afc35db0c29 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -145,11 +145,14 @@ int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age) static void dsa_port_set_state_now(struct dsa_port *dp, u8 state, bool do_fast_age) { + struct dsa_switch *ds = dp->ds; int err; err = dsa_port_set_state(dp, state, do_fast_age); - if (err) - pr_err("DSA: failed to set STP state %u (%d)\n", state, err); + if (err && err != -EOPNOTSUPP) { + dev_err(ds->dev, "port %d failed to set STP state %u: %pe\n", + dp->index, state, ERR_PTR(err)); + } } int dsa_port_set_mst_state(struct dsa_port *dp, @@ -1552,7 +1555,7 @@ int dsa_port_phylink_create(struct dsa_port *dp) return 0; } -static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) +static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable) { struct dsa_switch *ds = dp->ds; struct phy_device *phydev; @@ -1590,7 +1593,7 @@ err_put_dev: return err; } -static int dsa_port_fixed_link_register_of(struct dsa_port *dp) +static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp) { struct device_node *dn = dp->dn; struct dsa_switch *ds = dp->ds; @@ -1624,7 +1627,7 @@ static int dsa_port_fixed_link_register_of(struct dsa_port *dp) return 0; } -static int dsa_port_phylink_register(struct dsa_port *dp) +static int dsa_shared_port_phylink_register(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; struct device_node *port_dn = dp->dn; @@ -1650,22 +1653,184 @@ err_phy_connect: return err; } -int dsa_port_link_register_of(struct dsa_port *dp) +/* During the initial DSA driver migration to OF, port nodes were sometimes + * added to device trees with no indication of how they should operate from a + * link management perspective (phy-handle, fixed-link, etc). Additionally, the + * phy-mode may be absent. The interpretation of these port OF nodes depends on + * their type. + * + * User ports with no phy-handle or fixed-link are expected to connect to an + * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to + * the port number. This description is still actively supported. + * + * Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to + * operate at the maximum speed that their phy-mode is capable of. If the + * phy-mode is absent, they are expected to operate using the phy-mode + * supported by the port that gives the highest link speed. It is unspecified + * if the port should use flow control or not, half duplex or full duplex, or + * if the phy-mode is a SERDES link, whether in-band autoneg is expected to be + * enabled or not. + * + * In the latter case of shared ports, omitting the link management description + * from the firmware node is deprecated and strongly discouraged. DSA uses + * phylink, which rejects the firmware nodes of these ports for lacking + * required properties. + * + * For switches in this table, DSA will skip enforcing validation and will + * later omit registering a phylink instance for the shared ports, if they lack + * a fixed-link, a phy-handle, or a managed = "in-band-status" property. + * It becomes the responsibility of the driver to ensure that these ports + * operate at the maximum speed (whatever this means) and will interoperate + * with the DSA master or other cascade port, since phylink methods will not be + * invoked for them. + * + * If you are considering expanding this table for newly introduced switches, + * think again. It is OK to remove switches from this table if there aren't DT + * blobs in circulation which rely on defaulting the shared ports. + */ +static const char * const dsa_switches_apply_workarounds[] = { +#if IS_ENABLED(CONFIG_NET_DSA_XRS700X) + "arrow,xrs7003e", + "arrow,xrs7003f", + "arrow,xrs7004e", + "arrow,xrs7004f", +#endif +#if IS_ENABLED(CONFIG_B53) + "brcm,bcm5325", + "brcm,bcm53115", + "brcm,bcm53125", + "brcm,bcm53128", + "brcm,bcm5365", + "brcm,bcm5389", + "brcm,bcm5395", + "brcm,bcm5397", + "brcm,bcm5398", + "brcm,bcm53010-srab", + "brcm,bcm53011-srab", + "brcm,bcm53012-srab", + "brcm,bcm53018-srab", + "brcm,bcm53019-srab", + "brcm,bcm5301x-srab", + "brcm,bcm11360-srab", + "brcm,bcm58522-srab", + "brcm,bcm58525-srab", + "brcm,bcm58535-srab", + "brcm,bcm58622-srab", + "brcm,bcm58623-srab", + "brcm,bcm58625-srab", + "brcm,bcm88312-srab", + "brcm,cygnus-srab", + "brcm,nsp-srab", + "brcm,omega-srab", + "brcm,bcm3384-switch", + "brcm,bcm6328-switch", + "brcm,bcm6368-switch", + "brcm,bcm63xx-switch", +#endif +#if IS_ENABLED(CONFIG_NET_DSA_BCM_SF2) + "brcm,bcm7445-switch-v4.0", + "brcm,bcm7278-switch-v4.0", + "brcm,bcm7278-switch-v4.8", +#endif +#if IS_ENABLED(CONFIG_NET_DSA_LANTIQ_GSWIP) + "lantiq,xrx200-gswip", + "lantiq,xrx300-gswip", + "lantiq,xrx330-gswip", +#endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6060) + "marvell,mv88e6060", +#endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6XXX) + "marvell,mv88e6085", + "marvell,mv88e6190", + "marvell,mv88e6250", +#endif +#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) + "microchip,ksz8765", + "microchip,ksz8794", + "microchip,ksz8795", + "microchip,ksz8863", + "microchip,ksz8873", + "microchip,ksz9477", + "microchip,ksz9897", + "microchip,ksz9893", + "microchip,ksz9563", + "microchip,ksz8563", + "microchip,ksz9567", +#endif +#if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) + "smsc,lan9303-mdio", +#endif +#if IS_ENABLED(CONFIG_NET_DSA_SMSC_LAN9303_I2C) + "smsc,lan9303-i2c", +#endif + NULL, +}; + +static void dsa_shared_port_validate_of(struct dsa_port *dp, + bool *missing_phy_mode, + bool *missing_link_description) { + struct device_node *dn = dp->dn, *phy_np; struct dsa_switch *ds = dp->ds; - struct device_node *phy_np; + phy_interface_t mode; + + *missing_phy_mode = false; + *missing_link_description = false; + + if (of_get_phy_mode(dn, &mode)) { + *missing_phy_mode = true; + dev_err(ds->dev, + "OF node %pOF of %s port %d lacks the required \"phy-mode\" property\n", + dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); + } + + /* Note: of_phy_is_fixed_link() also returns true for + * managed = "in-band-status" + */ + if (of_phy_is_fixed_link(dn)) + return; + + phy_np = of_parse_phandle(dn, "phy-handle", 0); + if (phy_np) { + of_node_put(phy_np); + return; + } + + *missing_link_description = true; + + dev_err(ds->dev, + "OF node %pOF of %s port %d lacks the required \"phy-handle\", \"fixed-link\" or \"managed\" properties\n", + dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); +} + +int dsa_shared_port_link_register_of(struct dsa_port *dp) +{ + struct dsa_switch *ds = dp->ds; + bool missing_link_description; + bool missing_phy_mode; int port = dp->index; + dsa_shared_port_validate_of(dp, &missing_phy_mode, + &missing_link_description); + + if ((missing_phy_mode || missing_link_description) && + !of_device_compatible_match(ds->dev->of_node, + dsa_switches_apply_workarounds)) + return -EINVAL; + if (!ds->ops->adjust_link) { - phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); - if (of_phy_is_fixed_link(dp->dn) || phy_np) { + if (missing_link_description) { + dev_warn(ds->dev, + "Skipping phylink registration for %s port %d\n", + dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index); + } else { if (ds->ops->phylink_mac_link_down) ds->ops->phylink_mac_link_down(ds, port, MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); - of_node_put(phy_np); - return dsa_port_phylink_register(dp); + + return dsa_shared_port_phylink_register(dp); } - of_node_put(phy_np); return 0; } @@ -1673,12 +1838,12 @@ int dsa_port_link_register_of(struct dsa_port *dp) "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); if (of_phy_is_fixed_link(dp->dn)) - return dsa_port_fixed_link_register_of(dp); + return dsa_shared_port_fixed_link_register_of(dp); else - return dsa_port_setup_phy_of(dp, true); + return dsa_shared_port_setup_phy_of(dp, true); } -void dsa_port_link_unregister_of(struct dsa_port *dp) +void dsa_shared_port_link_unregister_of(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; @@ -1694,7 +1859,7 @@ void dsa_port_link_unregister_of(struct dsa_port *dp) if (of_phy_is_fixed_link(dp->dn)) of_phy_deregister_fixed_link(dp->dn); else - dsa_port_setup_phy_of(dp, false); + dsa_shared_port_setup_phy_of(dp, false); } int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ad6a6663feeb..345106b1ed78 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -826,9 +826,9 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) static void dsa_slave_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); - strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); + strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); + strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); } static int dsa_slave_get_regs_len(struct net_device *dev) @@ -2476,6 +2476,9 @@ static int dsa_slave_changeupper(struct net_device *dev, struct netlink_ext_ack *extack; int err = NOTIFY_DONE; + if (!dsa_slave_dev_check(dev)) + return err; + extack = netdev_notifier_info_to_extack(&info->info); if (netif_is_bridge_master(info->upper_dev)) { @@ -2484,7 +2487,7 @@ static int dsa_slave_changeupper(struct net_device *dev, if (!err) dsa_bridge_mtu_normalization(dp); if (err == -EOPNOTSUPP) { - if (!extack->_msg) + if (extack && !extack->_msg) NL_SET_ERR_MSG_MOD(extack, "Offloading not supported"); err = 0; @@ -2531,6 +2534,9 @@ static int dsa_slave_prechangeupper(struct net_device *dev, { struct dsa_port *dp = dsa_slave_to_port(dev); + if (!dsa_slave_dev_check(dev)) + return NOTIFY_DONE; + if (netif_is_bridge_master(info->upper_dev) && !info->linking) dsa_port_pre_bridge_leave(dp, info->upper_dev); else if (netif_is_lag_master(info->upper_dev) && !info->linking) @@ -2551,6 +2557,9 @@ dsa_slave_lag_changeupper(struct net_device *dev, int err = NOTIFY_DONE; struct dsa_port *dp; + if (!netif_is_lag_master(dev)) + return err; + netdev_for_each_lower_dev(dev, lower, iter) { if (!dsa_slave_dev_check(lower)) continue; @@ -2580,6 +2589,9 @@ dsa_slave_lag_prechangeupper(struct net_device *dev, int err = NOTIFY_DONE; struct dsa_port *dp; + if (!netif_is_lag_master(dev)) + return err; + netdev_for_each_lower_dev(dev, lower, iter) { if (!dsa_slave_dev_check(lower)) continue; @@ -2687,6 +2699,75 @@ dsa_slave_prechangeupper_sanity_check(struct net_device *dev, return NOTIFY_DONE; } +static int +dsa_master_prechangeupper_sanity_check(struct net_device *master, + struct netdev_notifier_changeupper_info *info) +{ + struct netlink_ext_ack *extack; + + if (!netdev_uses_dsa(master)) + return NOTIFY_DONE; + + if (!info->linking) + return NOTIFY_DONE; + + /* Allow DSA switch uppers */ + if (dsa_slave_dev_check(info->upper_dev)) + return NOTIFY_DONE; + + /* Allow bridge uppers of DSA masters, subject to further + * restrictions in dsa_bridge_prechangelower_sanity_check() + */ + if (netif_is_bridge_master(info->upper_dev)) + return NOTIFY_DONE; + + extack = netdev_notifier_info_to_extack(&info->info); + + NL_SET_ERR_MSG_MOD(extack, + "DSA master cannot join unknown upper interfaces"); + return notifier_from_errno(-EBUSY); +} + +/* Don't allow bridging of DSA masters, since the bridge layer rx_handler + * prevents the DSA fake ethertype handler to be invoked, so we don't get the + * chance to strip off and parse the DSA switch tag protocol header (the bridge + * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these + * frames). + * The only case where that would not be an issue is when bridging can already + * be offloaded, such as when the DSA master is itself a DSA or plain switchdev + * port, and is bridged only with other ports from the same hardware device. + */ +static int +dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *br = info->upper_dev; + struct netlink_ext_ack *extack; + struct net_device *lower; + struct list_head *iter; + + if (!netif_is_bridge_master(br)) + return NOTIFY_DONE; + + if (!info->linking) + return NOTIFY_DONE; + + extack = netdev_notifier_info_to_extack(&info->info); + + netdev_for_each_lower_dev(br, lower, iter) { + if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower)) + continue; + + if (!netdev_port_same_parent_id(lower, new_lower)) { + NL_SET_ERR_MSG(extack, + "Cannot do software bridging with a DSA master"); + return notifier_from_errno(-EINVAL); + } + } + + return NOTIFY_DONE; +} + static int dsa_slave_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -2698,25 +2779,40 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, int err; err = dsa_slave_prechangeupper_sanity_check(dev, info); - if (err != NOTIFY_DONE) + if (notifier_to_errno(err)) + return err; + + err = dsa_master_prechangeupper_sanity_check(dev, info); + if (notifier_to_errno(err)) return err; - if (dsa_slave_dev_check(dev)) - return dsa_slave_prechangeupper(dev, ptr); + err = dsa_bridge_prechangelower_sanity_check(dev, info); + if (notifier_to_errno(err)) + return err; - if (netif_is_lag_master(dev)) - return dsa_slave_lag_prechangeupper(dev, ptr); + err = dsa_slave_prechangeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; + + err = dsa_slave_lag_prechangeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; break; } - case NETDEV_CHANGEUPPER: - if (dsa_slave_dev_check(dev)) - return dsa_slave_changeupper(dev, ptr); + case NETDEV_CHANGEUPPER: { + int err; - if (netif_is_lag_master(dev)) - return dsa_slave_lag_changeupper(dev, ptr); + err = dsa_slave_changeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; + + err = dsa_slave_lag_changeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; break; + } case NETDEV_CHANGELOWERSTATE: { struct netdev_notifier_changelowerstate_info *info = ptr; struct dsa_port *dp; @@ -2777,6 +2873,9 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, if (!dsa_port_is_user(dp)) continue; + if (dp->cpu_dp != cpu_dp) + continue; + list_add(&dp->slave->close_list, &close_list); } diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 01a427800797..b5f80bc45ceb 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -2,9 +2,7 @@ /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> * * This module is not a complete tagger implementation. It only provides - * primitives for taggers that rely on 802.1Q VLAN tags to use. The - * dsa_8021q_netdev_ops is registered for API compliance and not used - * directly by callers. + * primitives for taggers that rely on 802.1Q VLAN tags to use. */ #include <linux/if_vlan.h> #include <linux/dsa/8021q.h> diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c index eb204ad36eee..846588c0070a 100644 --- a/net/dsa/tag_hellcreek.c +++ b/net/dsa/tag_hellcreek.c @@ -45,7 +45,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb, skb->dev = dsa_master_find_slave(dev, 0, port); if (!skb->dev) { - netdev_warn(dev, "Failed to get source port: %d\n", port); + netdev_warn_once(dev, "Failed to get source port: %d\n", port); return NULL; } diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 62b89d6f54fd..e02daa74e833 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -414,12 +414,9 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) off_eth = skb_gro_offset(skb); hlen = off_eth + sizeof(*eh); - eh = skb_gro_header_fast(skb, off_eth); - if (skb_gro_header_hard(skb, hlen)) { - eh = skb_gro_header_slow(skb, hlen, off_eth); - if (unlikely(!eh)) - goto out; - } + eh = skb_gro_header(skb, hlen, off_eth); + if (unlikely(!eh)) + goto out; flush = 0; diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 6a7308de192d..9298eb3251cb 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -714,16 +714,16 @@ ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp) const struct ethtool_ops *ops = dev->ethtool_ops; rsp->info.cmd = ETHTOOL_GDRVINFO; - strlcpy(rsp->info.version, UTS_RELEASE, sizeof(rsp->info.version)); + strscpy(rsp->info.version, UTS_RELEASE, sizeof(rsp->info.version)); if (ops->get_drvinfo) { ops->get_drvinfo(dev, &rsp->info); } else if (dev->dev.parent && dev->dev.parent->driver) { - strlcpy(rsp->info.bus_info, dev_name(dev->dev.parent), + strscpy(rsp->info.bus_info, dev_name(dev->dev.parent), sizeof(rsp->info.bus_info)); - strlcpy(rsp->info.driver, dev->dev.parent->driver->name, + strscpy(rsp->info.driver, dev->dev.parent->driver->name, sizeof(rsp->info.driver)); } else if (dev->rtnl_link_ops) { - strlcpy(rsp->info.driver, dev->rtnl_link_ops->kind, + strscpy(rsp->info.driver, dev->rtnl_link_ops->kind, sizeof(rsp->info.driver)); } else { return -EOPNOTSUPP; diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index e26079e11835..f4e41a6e0163 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -361,6 +361,9 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) ops = ethnl_default_requests[cmd]; if (WARN_ONCE(!ops, "cmd %u has no ethnl_request_ops\n", cmd)) return -EOPNOTSUPP; + if (GENL_REQ_ATTR_CHECK(info, ops->hdr_attr)) + return -EINVAL; + req_info = kzalloc(ops->req_info_size, GFP_KERNEL); if (!req_info) return -ENOMEM; @@ -1033,6 +1036,7 @@ static struct genl_family ethtool_genl_family __ro_after_init = { .parallel_ops = true, .ops = ethtool_genl_ops, .n_ops = ARRAY_SIZE(ethtool_genl_ops), + .resv_start_op = ETHTOOL_MSG_MODULE_GET + 1, .mcgrps = ethtool_nl_mcgrps, .n_mcgrps = ARRAY_SIZE(ethtool_nl_mcgrps), }; diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c index 2d51b7ab4dc5..3f7de54d85fb 100644 --- a/net/ethtool/strset.c +++ b/net/ethtool/strset.c @@ -167,7 +167,7 @@ static int strset_get_id(const struct nlattr *nest, u32 *val, get_stringset_policy, extack); if (ret < 0) return ret; - if (!tb[ETHTOOL_A_STRINGSET_ID]) + if (NL_REQ_ATTR_CHECK(extack, nest, tb, ETHTOOL_A_STRINGSET_ID)) return -EINVAL; *val = nla_get_u32(tb[ETHTOOL_A_STRINGSET_ID]); diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 1405c037cf7a..7174a9092900 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -522,6 +522,7 @@ static struct genl_family hsr_genl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = hsr_ops, .n_small_ops = ARRAY_SIZE(hsr_ops), + .resv_start_op = HSR_C_SET_NODE_LIST + 1, .mcgrps = hsr_mcgrps, .n_mcgrps = ARRAY_SIZE(hsr_mcgrps), }; diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c index b07abc38b4b3..7d2de4ee6992 100644 --- a/net/ieee802154/netlink.c +++ b/net/ieee802154/netlink.c @@ -132,6 +132,7 @@ struct genl_family nl802154_family __ro_after_init = { .module = THIS_MODULE, .small_ops = ieee802154_ops, .n_small_ops = ARRAY_SIZE(ieee802154_ops), + .resv_start_op = IEEE802154_LLSEC_DEL_SECLEVEL + 1, .mcgrps = ieee802154_mcgrps, .n_mcgrps = ARRAY_SIZE(ieee802154_mcgrps), }; diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index e0b072aecf0f..38c4f3cb010e 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -2500,6 +2500,7 @@ static struct genl_family nl802154_fam __ro_after_init = { .module = THIS_MODULE, .ops = nl802154_ops, .n_ops = ARRAY_SIZE(nl802154_ops), + .resv_start_op = NL802154_CMD_DEL_SEC_LEVEL + 1, .mcgrps = nl802154_mcgrps, .n_mcgrps = ARRAY_SIZE(nl802154_mcgrps), }; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 3ca0cc467886..d3ab1ae32ef5 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1219,6 +1219,7 @@ EXPORT_SYMBOL(inet_unregister_protosw); static int inet_sk_reselect_saddr(struct sock *sk) { + struct inet_bind_hashbucket *prev_addr_hashbucket; struct inet_sock *inet = inet_sk(sk); __be32 old_saddr = inet->inet_saddr; __be32 daddr = inet->inet_daddr; @@ -1226,6 +1227,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) struct rtable *rt; __be32 new_saddr; struct ip_options_rcu *inet_opt; + int err; inet_opt = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); @@ -1240,20 +1242,34 @@ static int inet_sk_reselect_saddr(struct sock *sk) if (IS_ERR(rt)) return PTR_ERR(rt); - sk_setup_caps(sk, &rt->dst); - new_saddr = fl4->saddr; - if (new_saddr == old_saddr) + if (new_saddr == old_saddr) { + sk_setup_caps(sk, &rt->dst); return 0; + } + + prev_addr_hashbucket = + inet_bhashfn_portaddr(sk->sk_prot->h.hashinfo, sk, + sock_net(sk), inet->inet_num); + + inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; + + err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk); + if (err) { + inet->inet_saddr = old_saddr; + inet->inet_rcv_saddr = old_saddr; + ip_rt_put(rt); + return err; + } + + sk_setup_caps(sk, &rt->dst); if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) { pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n", __func__, &old_saddr, &new_saddr); } - inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; - /* * XXX The only one ugly spot where we need to * XXX really change the sockets identity after @@ -1448,12 +1464,9 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) off = skb_gro_offset(skb); hlen = off + sizeof(*iph); - iph = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - iph = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!iph)) - goto out; - } + iph = skb_gro_header(skb, hlen, off); + if (unlikely(!iph)) + goto out; proto = iph->protocol; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 87c7e3fc5197..4f7237661afb 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1129,7 +1129,7 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) r->arp_flags = arp_state_to_flags(neigh); read_unlock_bh(&neigh->lock); r->arp_ha.sa_family = dev->type; - strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); + strscpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); err = 0; } neigh_release(neigh); diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index ffd57523331f..405a8c2aea64 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -42,6 +42,8 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; + } else if (!oif) { + oif = inet->uc_index; } fl4 = &inet->cork.fl.u.ip4; rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr, oif, diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 92b778e423df..e8b9a9202fec 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -2682,23 +2682,27 @@ static __net_init int devinet_init_net(struct net *net) #endif if (!net_eq(net, &init_net)) { - if (IS_ENABLED(CONFIG_SYSCTL) && - sysctl_devconf_inherit_init_net == 3) { + switch (net_inherit_devconf()) { + case 3: /* copy from the current netns */ memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all, sizeof(ipv4_devconf)); memcpy(dflt, current->nsproxy->net_ns->ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt)); - } else if (!IS_ENABLED(CONFIG_SYSCTL) || - sysctl_devconf_inherit_init_net != 2) { - /* inherit == 0 or 1: copy from init_net */ + break; + case 0: + case 1: + /* copy from init_net */ memcpy(all, init_net.ipv4.devconf_all, sizeof(ipv4_devconf)); memcpy(dflt, init_net.ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt)); + break; + case 2: + /* use compiled values */ + break; } - /* else inherit == 2: use compiled values */ } #ifdef CONFIG_SYSCTL diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index f361d3d56be2..943edf4ad4db 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -389,7 +389,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, dev_match = dev_match || (res.type == RTN_LOCAL && dev == net->loopback_dev); if (dev_match) { - ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST; + ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK; return ret; } if (no_addr) @@ -401,7 +401,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, ret = 0; if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) { if (res.type == RTN_UNICAST) - ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST; + ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK; } return ret; diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 025a33c1b04d..0c3c6d0cee29 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -323,12 +323,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk, off = skb_gro_offset(skb); len = off + sizeof(*guehdr); - guehdr = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, len)) { - guehdr = skb_gro_header_slow(skb, len, off); - if (unlikely(!guehdr)) - goto out; - } + guehdr = skb_gro_header(skb, len, off); + if (unlikely(!guehdr)) + goto out; switch (guehdr->version) { case 0: @@ -931,6 +928,7 @@ static struct genl_family fou_nl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = fou_nl_ops, .n_small_ops = ARRAY_SIZE(fou_nl_ops), + .resv_start_op = FOU_CMD_GET + 1, }; size_t fou_encap_hlen(struct ip_tunnel_encap *e) diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 07073fa35205..2b9cb5398335 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -137,12 +137,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head, off = skb_gro_offset(skb); hlen = off + sizeof(*greh); - greh = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - greh = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!greh)) - goto out; - } + greh = skb_gro_header(skb, hlen, off); + if (unlikely(!greh)) + goto out; /* Only support version 0 and K (key), C (csum) flags. Note that * although the support for the S (seq#) flag can be added easily diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index e3ab0cb61624..df0660d818ac 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2529,11 +2529,10 @@ done: err = ip_mc_leave_group(sk, &imr); return err; } - int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, - struct ip_msfilter __user *optval, int __user *optlen) + sockptr_t optval, sockptr_t optlen) { - int err, len, count, copycount; + int err, len, count, copycount, msf_size; struct ip_mreqn imr; __be32 addr = msf->imsf_multiaddr; struct ip_mc_socklist *pmc; @@ -2575,12 +2574,15 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc; len = flex_array_size(psl, sl_addr, copycount); msf->imsf_numsrc = count; - if (put_user(IP_MSFILTER_SIZE(copycount), optlen) || - copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) { + msf_size = IP_MSFILTER_SIZE(copycount); + if (copy_to_sockptr(optlen, &msf_size, sizeof(int)) || + copy_to_sockptr(optval, msf, IP_MSFILTER_SIZE(0))) { return -EFAULT; } if (len && - copy_to_user(&optval->imsf_slist_flex[0], psl->sl_addr, len)) + copy_to_sockptr_offset(optval, + offsetof(struct ip_msfilter, imsf_slist_flex), + psl->sl_addr, len)) return -EFAULT; return 0; done: @@ -2588,7 +2590,7 @@ done: } int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, - struct sockaddr_storage __user *p) + sockptr_t optval, size_t ss_offset) { int i, count, copycount; struct sockaddr_in *psin; @@ -2618,15 +2620,17 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, count = psl ? psl->sl_count : 0; copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; gsf->gf_numsrc = count; - for (i = 0; i < copycount; i++, p++) { + for (i = 0; i < copycount; i++) { struct sockaddr_storage ss; psin = (struct sockaddr_in *)&ss; memset(&ss, 0, sizeof(ss)); psin->sin_family = AF_INET; psin->sin_addr.s_addr = psl->sl_addr[i]; - if (copy_to_user(p, &ss, sizeof(ss))) + if (copy_to_sockptr_offset(optval, ss_offset, + &ss, sizeof(ss))) return -EFAULT; + ss_offset += sizeof(ss); } return 0; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index eb31c7158b39..f0038043b661 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -130,14 +130,75 @@ void inet_get_local_port_range(struct net *net, int *low, int *high) } EXPORT_SYMBOL(inet_get_local_port_range); +static bool inet_use_bhash2_on_bind(const struct sock *sk) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); + + return addr_type != IPV6_ADDR_ANY && + addr_type != IPV6_ADDR_MAPPED; + } +#endif + return sk->sk_rcv_saddr != htonl(INADDR_ANY); +} + +static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2, + kuid_t sk_uid, bool relax, + bool reuseport_cb_ok, bool reuseport_ok) +{ + int bound_dev_if2; + + if (sk == sk2) + return false; + + bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); + + if (!sk->sk_bound_dev_if || !bound_dev_if2 || + sk->sk_bound_dev_if == bound_dev_if2) { + if (sk->sk_reuse && sk2->sk_reuse && + sk2->sk_state != TCP_LISTEN) { + if (!relax || (!reuseport_ok && sk->sk_reuseport && + sk2->sk_reuseport && reuseport_cb_ok && + (sk2->sk_state == TCP_TIME_WAIT || + uid_eq(sk_uid, sock_i_uid(sk2))))) + return true; + } else if (!reuseport_ok || !sk->sk_reuseport || + !sk2->sk_reuseport || !reuseport_cb_ok || + (sk2->sk_state != TCP_TIME_WAIT && + !uid_eq(sk_uid, sock_i_uid(sk2)))) { + return true; + } + } + return false; +} + +static bool inet_bhash2_conflict(const struct sock *sk, + const struct inet_bind2_bucket *tb2, + kuid_t sk_uid, + bool relax, bool reuseport_cb_ok, + bool reuseport_ok) +{ + struct sock *sk2; + + sk_for_each_bound_bhash2(sk2, &tb2->owners) { + if (sk->sk_family == AF_INET && ipv6_only_sock(sk2)) + continue; + + if (inet_bind_conflict(sk, sk2, sk_uid, relax, + reuseport_cb_ok, reuseport_ok)) + return true; + } + return false; +} + +/* This should be called only when the tb and tb2 hashbuckets' locks are held */ static int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb, + const struct inet_bind2_bucket *tb2, /* may be null */ bool relax, bool reuseport_ok) { - struct sock *sk2; bool reuseport_cb_ok; - bool reuse = sk->sk_reuse; - bool reuseport = !!sk->sk_reuseport; struct sock_reuseport *reuseport_cb; kuid_t uid = sock_i_uid((struct sock *)sk); @@ -150,55 +211,87 @@ static int inet_csk_bind_conflict(const struct sock *sk, /* * Unlike other sk lookup places we do not check * for sk_net here, since _all_ the socks listed - * in tb->owners list belong to the same net - the - * one this bucket belongs to. + * in tb->owners and tb2->owners list belong + * to the same net - the one this bucket belongs to. */ - sk_for_each_bound(sk2, &tb->owners) { - int bound_dev_if2; + if (!inet_use_bhash2_on_bind(sk)) { + struct sock *sk2; - if (sk == sk2) - continue; - bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); - if ((!sk->sk_bound_dev_if || - !bound_dev_if2 || - sk->sk_bound_dev_if == bound_dev_if2)) { - if (reuse && sk2->sk_reuse && - sk2->sk_state != TCP_LISTEN) { - if ((!relax || - (!reuseport_ok && - reuseport && sk2->sk_reuseport && - reuseport_cb_ok && - (sk2->sk_state == TCP_TIME_WAIT || - uid_eq(uid, sock_i_uid(sk2))))) && - inet_rcv_saddr_equal(sk, sk2, true)) - break; - } else if (!reuseport_ok || - !reuseport || !sk2->sk_reuseport || - !reuseport_cb_ok || - (sk2->sk_state != TCP_TIME_WAIT && - !uid_eq(uid, sock_i_uid(sk2)))) { - if (inet_rcv_saddr_equal(sk, sk2, true)) - break; - } - } + sk_for_each_bound(sk2, &tb->owners) + if (inet_bind_conflict(sk, sk2, uid, relax, + reuseport_cb_ok, reuseport_ok) && + inet_rcv_saddr_equal(sk, sk2, true)) + return true; + + return false; + } + + /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if + * ipv4) should have been checked already. We need to do these two + * checks separately because their spinlocks have to be acquired/released + * independently of each other, to prevent possible deadlocks + */ + return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, + reuseport_ok); +} + +/* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or + * INADDR_ANY (if ipv4) socket. + * + * Caller must hold bhash hashbucket lock with local bh disabled, to protect + * against concurrent binds on the port for addr any + */ +static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev, + bool relax, bool reuseport_ok) +{ + kuid_t uid = sock_i_uid((struct sock *)sk); + const struct net *net = sock_net(sk); + struct sock_reuseport *reuseport_cb; + struct inet_bind_hashbucket *head2; + struct inet_bind2_bucket *tb2; + bool reuseport_cb_ok; + + rcu_read_lock(); + reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); + /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ + reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); + rcu_read_unlock(); + + head2 = inet_bhash2_addr_any_hashbucket(sk, net, port); + + spin_lock(&head2->lock); + + inet_bind_bucket_for_each(tb2, &head2->chain) + if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk)) + break; + + if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, + reuseport_ok)) { + spin_unlock(&head2->lock); + return true; } - return sk2 != NULL; + + spin_unlock(&head2->lock); + return false; } /* * Find an open port number for the socket. Returns with the - * inet_bind_hashbucket lock held. + * inet_bind_hashbucket locks held if successful. */ static struct inet_bind_hashbucket * -inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret) +inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret, + struct inet_bind2_bucket **tb2_ret, + struct inet_bind_hashbucket **head2_ret, int *port_ret) { struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; int port = 0; - struct inet_bind_hashbucket *head; + struct inet_bind_hashbucket *head, *head2; struct net *net = sock_net(sk); bool relax = false; int i, low, high, attempt_half; + struct inet_bind2_bucket *tb2; struct inet_bind_bucket *tb; u32 remaining, offset; int l3mdev; @@ -239,11 +332,20 @@ other_parity_scan: head = &hinfo->bhash[inet_bhashfn(net, port, hinfo->bhash_size)]; spin_lock_bh(&head->lock); + if (inet_use_bhash2_on_bind(sk)) { + if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false)) + goto next_port; + } + + head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); + spin_lock(&head2->lock); + tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); inet_bind_bucket_for_each(tb, &head->chain) - if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && - tb->port == port) { - if (!inet_csk_bind_conflict(sk, tb, relax, false)) + if (inet_bind_bucket_match(tb, net, port, l3mdev)) { + if (!inet_csk_bind_conflict(sk, tb, tb2, + relax, false)) goto success; + spin_unlock(&head2->lock); goto next_port; } tb = NULL; @@ -272,6 +374,8 @@ next_port: success: *port_ret = port; *tb_ret = tb; + *tb2_ret = tb2; + *head2_ret = head2; return head; } @@ -368,53 +472,95 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; int ret = 1, port = snum; - struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); + bool found_port = false, check_bind_conflict = true; + bool bhash_created = false, bhash2_created = false; + struct inet_bind_hashbucket *head, *head2; + struct inet_bind2_bucket *tb2 = NULL; struct inet_bind_bucket *tb = NULL; + bool head2_lock_acquired = false; int l3mdev; l3mdev = inet_sk_bound_l3mdev(sk); if (!port) { - head = inet_csk_find_open_port(sk, &tb, &port); + head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port); if (!head) return ret; + + head2_lock_acquired = true; + + if (tb && tb2) + goto success; + found_port = true; + } else { + head = &hinfo->bhash[inet_bhashfn(net, port, + hinfo->bhash_size)]; + spin_lock_bh(&head->lock); + inet_bind_bucket_for_each(tb, &head->chain) + if (inet_bind_bucket_match(tb, net, port, l3mdev)) + break; + } + + if (!tb) { + tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, + head, port, l3mdev); if (!tb) - goto tb_not_found; - goto success; + goto fail_unlock; + bhash_created = true; } - head = &hinfo->bhash[inet_bhashfn(net, port, - hinfo->bhash_size)]; - spin_lock_bh(&head->lock); - inet_bind_bucket_for_each(tb, &head->chain) - if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && - tb->port == port) - goto tb_found; -tb_not_found: - tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, - net, head, port, l3mdev); - if (!tb) - goto fail_unlock; -tb_found: - if (!hlist_empty(&tb->owners)) { - if (sk->sk_reuse == SK_FORCE_REUSE) - goto success; - if ((tb->fastreuse > 0 && reuse) || - sk_reuseport_match(tb, sk)) - goto success; - if (inet_csk_bind_conflict(sk, tb, true, true)) + if (!found_port) { + if (!hlist_empty(&tb->owners)) { + if (sk->sk_reuse == SK_FORCE_REUSE || + (tb->fastreuse > 0 && reuse) || + sk_reuseport_match(tb, sk)) + check_bind_conflict = false; + } + + if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) { + if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true)) + goto fail_unlock; + } + + head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); + spin_lock(&head2->lock); + head2_lock_acquired = true; + tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); + } + + if (!tb2) { + tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, + net, head2, port, l3mdev, sk); + if (!tb2) goto fail_unlock; + bhash2_created = true; } + + if (!found_port && check_bind_conflict) { + if (inet_csk_bind_conflict(sk, tb, tb2, true, true)) + goto fail_unlock; + } + success: inet_csk_update_fastreuse(tb, sk); if (!inet_csk(sk)->icsk_bind_hash) - inet_bind_hash(sk, tb, port); + inet_bind_hash(sk, tb, tb2, port); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); + WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); ret = 0; fail_unlock: + if (ret) { + if (bhash_created) + inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); + if (bhash2_created) + inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, + tb2); + } + if (head2_lock_acquired) + spin_unlock(&head2->lock); spin_unlock_bh(&head->lock); return ret; } @@ -962,6 +1108,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, inet_sk_set_state(newsk, TCP_SYN_RECV); newicsk->icsk_bind_hash = NULL; + newicsk->icsk_bind2_hash = NULL; inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index b9d995b5ce24..60d77e234a68 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -92,12 +92,75 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket } } +bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net, + unsigned short port, int l3mdev) +{ + return net_eq(ib_net(tb), net) && tb->port == port && + tb->l3mdev == l3mdev; +} + +static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb, + struct net *net, + struct inet_bind_hashbucket *head, + unsigned short port, int l3mdev, + const struct sock *sk) +{ + write_pnet(&tb->ib_net, net); + tb->l3mdev = l3mdev; + tb->port = port; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr; + else +#endif + tb->rcv_saddr = sk->sk_rcv_saddr; + INIT_HLIST_HEAD(&tb->owners); + hlist_add_head(&tb->node, &head->chain); +} + +struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep, + struct net *net, + struct inet_bind_hashbucket *head, + unsigned short port, + int l3mdev, + const struct sock *sk) +{ + struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); + + if (tb) + inet_bind2_bucket_init(tb, net, head, port, l3mdev, sk); + + return tb; +} + +/* Caller must hold hashbucket lock for this tb with local BH disabled */ +void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb) +{ + if (hlist_empty(&tb->owners)) { + __hlist_del(&tb->node); + kmem_cache_free(cachep, tb); + } +} + +static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2, + const struct sock *sk) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + return ipv6_addr_equal(&tb2->v6_rcv_saddr, + &sk->sk_v6_rcv_saddr); +#endif + return tb2->rcv_saddr == sk->sk_rcv_saddr; +} + void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, - const unsigned short snum) + struct inet_bind2_bucket *tb2, unsigned short port) { - inet_sk(sk)->inet_num = snum; + inet_sk(sk)->inet_num = port; sk_add_bind_node(sk, &tb->owners); inet_csk(sk)->icsk_bind_hash = tb; + sk_add_bind2_node(sk, &tb2->owners); + inet_csk(sk)->icsk_bind2_hash = tb2; } /* @@ -109,6 +172,9 @@ static void __inet_put_port(struct sock *sk) const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, hashinfo->bhash_size); struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; + struct inet_bind_hashbucket *head2 = + inet_bhashfn_portaddr(hashinfo, sk, sock_net(sk), + inet_sk(sk)->inet_num); struct inet_bind_bucket *tb; spin_lock(&head->lock); @@ -117,6 +183,17 @@ static void __inet_put_port(struct sock *sk) inet_csk(sk)->icsk_bind_hash = NULL; inet_sk(sk)->inet_num = 0; inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); + + spin_lock(&head2->lock); + if (inet_csk(sk)->icsk_bind2_hash) { + struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash; + + __sk_del_bind2_node(sk); + inet_csk(sk)->icsk_bind2_hash = NULL; + inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2); + } + spin_unlock(&head2->lock); + spin_unlock(&head->lock); } @@ -135,12 +212,21 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) const int bhash = inet_bhashfn(sock_net(sk), port, table->bhash_size); struct inet_bind_hashbucket *head = &table->bhash[bhash]; + struct inet_bind_hashbucket *head2 = + inet_bhashfn_portaddr(table, child, sock_net(sk), port); + bool created_inet_bind_bucket = false; + bool update_fastreuse = false; + struct net *net = sock_net(sk); + struct inet_bind2_bucket *tb2; struct inet_bind_bucket *tb; int l3mdev; spin_lock(&head->lock); + spin_lock(&head2->lock); tb = inet_csk(sk)->icsk_bind_hash; - if (unlikely(!tb)) { + tb2 = inet_csk(sk)->icsk_bind2_hash; + if (unlikely(!tb || !tb2)) { + spin_unlock(&head2->lock); spin_unlock(&head->lock); return -ENOENT; } @@ -153,25 +239,49 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) * as that of the child socket. We have to look up or * create a new bind bucket for the child here. */ inet_bind_bucket_for_each(tb, &head->chain) { - if (net_eq(ib_net(tb), sock_net(sk)) && - tb->l3mdev == l3mdev && tb->port == port) + if (inet_bind_bucket_match(tb, net, port, l3mdev)) break; } if (!tb) { tb = inet_bind_bucket_create(table->bind_bucket_cachep, - sock_net(sk), head, port, - l3mdev); + net, head, port, l3mdev); if (!tb) { + spin_unlock(&head2->lock); spin_unlock(&head->lock); return -ENOMEM; } + created_inet_bind_bucket = true; + } + update_fastreuse = true; + + goto bhash2_find; + } else if (!inet_bind2_bucket_addr_match(tb2, child)) { + l3mdev = inet_sk_bound_l3mdev(sk); + +bhash2_find: + tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child); + if (!tb2) { + tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep, + net, head2, port, + l3mdev, child); + if (!tb2) + goto error; } - inet_csk_update_fastreuse(tb, child); } - inet_bind_hash(child, tb, port); + if (update_fastreuse) + inet_csk_update_fastreuse(tb, child); + inet_bind_hash(child, tb, tb2, port); + spin_unlock(&head2->lock); spin_unlock(&head->lock); return 0; + +error: + if (created_inet_bind_bucket) + inet_bind_bucket_destroy(table->bind_bucket_cachep, tb); + spin_unlock(&head2->lock); + spin_unlock(&head->lock); + return -ENOMEM; } EXPORT_SYMBOL_GPL(__inet_inherit_port); @@ -675,6 +785,112 @@ void inet_unhash(struct sock *sk) } EXPORT_SYMBOL_GPL(inet_unhash); +static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, + const struct net *net, unsigned short port, + int l3mdev, const struct sock *sk) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + return net_eq(ib2_net(tb), net) && tb->port == port && + tb->l3mdev == l3mdev && + ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); + else +#endif + return net_eq(ib2_net(tb), net) && tb->port == port && + tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr; +} + +bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net, + unsigned short port, int l3mdev, const struct sock *sk) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr addr_any = {}; + + if (sk->sk_family == AF_INET6) + return net_eq(ib2_net(tb), net) && tb->port == port && + tb->l3mdev == l3mdev && + ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any); + else +#endif + return net_eq(ib2_net(tb), net) && tb->port == port && + tb->l3mdev == l3mdev && tb->rcv_saddr == 0; +} + +/* The socket's bhash2 hashbucket spinlock must be held when this is called */ +struct inet_bind2_bucket * +inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net, + unsigned short port, int l3mdev, const struct sock *sk) +{ + struct inet_bind2_bucket *bhash2 = NULL; + + inet_bind_bucket_for_each(bhash2, &head->chain) + if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk)) + break; + + return bhash2; +} + +struct inet_bind_hashbucket * +inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port) +{ + struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; + u32 hash; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr addr_any = {}; + + if (sk->sk_family == AF_INET6) + hash = ipv6_portaddr_hash(net, &addr_any, port); + else +#endif + hash = ipv4_portaddr_hash(net, 0, port); + + return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)]; +} + +int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk) +{ + struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; + struct inet_bind2_bucket *tb2, *new_tb2; + int l3mdev = inet_sk_bound_l3mdev(sk); + struct inet_bind_hashbucket *head2; + int port = inet_sk(sk)->inet_num; + struct net *net = sock_net(sk); + + /* Allocate a bind2 bucket ahead of time to avoid permanently putting + * the bhash2 table in an inconsistent state if a new tb2 bucket + * allocation fails. + */ + new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC); + if (!new_tb2) + return -ENOMEM; + + head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); + + if (prev_saddr) { + spin_lock_bh(&prev_saddr->lock); + __sk_del_bind2_node(sk); + inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, + inet_csk(sk)->icsk_bind2_hash); + spin_unlock_bh(&prev_saddr->lock); + } + + spin_lock_bh(&head2->lock); + tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); + if (!tb2) { + tb2 = new_tb2; + inet_bind2_bucket_init(tb2, net, head2, port, l3mdev, sk); + } + sk_add_bind2_node(sk, &tb2->owners); + inet_csk(sk)->icsk_bind2_hash = tb2; + spin_unlock_bh(&head2->lock); + + if (tb2 != new_tb2) + kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2); + + return 0; +} +EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr); + /* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm * Note that we use 32bit integers (vs RFC 'short integers') * because 2^16 is not a multiple of num_ephemeral and this @@ -694,11 +910,13 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *, __u16, struct inet_timewait_sock **)) { struct inet_hashinfo *hinfo = death_row->hashinfo; + struct inet_bind_hashbucket *head, *head2; struct inet_timewait_sock *tw = NULL; - struct inet_bind_hashbucket *head; int port = inet_sk(sk)->inet_num; struct net *net = sock_net(sk); + struct inet_bind2_bucket *tb2; struct inet_bind_bucket *tb; + bool tb_created = false; u32 remaining, offset; int ret, i, low, high; int l3mdev; @@ -755,8 +973,7 @@ other_parity_scan: * the established check is already unique enough. */ inet_bind_bucket_for_each(tb, &head->chain) { - if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && - tb->port == port) { + if (inet_bind_bucket_match(tb, net, port, l3mdev)) { if (tb->fastreuse >= 0 || tb->fastreuseport >= 0) goto next_port; @@ -774,6 +991,7 @@ other_parity_scan: spin_unlock_bh(&head->lock); return -ENOMEM; } + tb_created = true; tb->fastreuse = -1; tb->fastreuseport = -1; goto ok; @@ -789,6 +1007,20 @@ next_port: return -EADDRNOTAVAIL; ok: + /* Find the corresponding tb2 bucket since we need to + * add the socket to the bhash2 table as well + */ + head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); + spin_lock(&head2->lock); + + tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); + if (!tb2) { + tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net, + head2, port, l3mdev, sk); + if (!tb2) + goto error; + } + /* Here we want to add a little bit of randomness to the next source * port that will be chosen. We use a max() with a random here so that * on low contention the randomness is maximal and on high contention @@ -798,7 +1030,10 @@ ok: WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); /* Head lock still held and bh's disabled */ - inet_bind_hash(sk, tb, port); + inet_bind_hash(sk, tb, tb2, port); + + spin_unlock(&head2->lock); + if (sk_unhashed(sk)) { inet_sk(sk)->inet_sport = htons(port); inet_ehash_nolisten(sk, (struct sock *)tw, NULL); @@ -810,6 +1045,13 @@ ok: inet_twsk_deschedule_put(tw); local_bh_enable(); return 0; + +error: + spin_unlock(&head2->lock); + if (tb_created) + inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); + spin_unlock_bh(&head->lock); + return -ENOMEM; } /* diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 5c58e21f724e..f866d6282b2b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -609,7 +609,7 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src, tunnel_id_to_key32(key->tun_id), key->tos & ~INET_ECN_MASK, dev_net(dev), 0, - skb->mark, skb_get_hash(skb)); + skb->mark, skb_get_hash(skb), key->flow_flags); rt = ip_route_output_key(dev_net(dev), &fl4); if (IS_ERR(rt)) return PTR_ERR(rt); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index d7bd1daf022b..8201cd423ff9 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1109,10 +1109,7 @@ alloc_new_skb: (fraglen + alloc_extra < SKB_MAX_ALLOC || !(rt->dst.dev->features & NETIF_F_SG))) alloclen = fraglen; - else if (!zc) { - alloclen = min_t(int, fraglen, MAX_HEADER); - pagedlen = fraglen - alloclen; - } else { + else { alloclen = fragheaderlen + transhdrlen; pagedlen = datalen - transhdrlen; } @@ -1730,7 +1727,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; - sk->sk_sndbuf = sysctl_wmem_default; + sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); ipc.sockc.mark = fl4.flowi4_mark; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index a8a323ecbb54..6e19cad154f5 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -772,7 +772,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) if (optlen < GROUP_FILTER_SIZE(0)) return -EINVAL; - if (optlen > sysctl_optmem_max) + if (optlen > READ_ONCE(sysctl_optmem_max)) return -ENOBUFS; gsf = memdup_sockptr(optval, optlen); @@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, if (optlen < size0) return -EINVAL; - if (optlen > sysctl_optmem_max - 4) + if (optlen > READ_ONCE(sysctl_optmem_max) - 4) return -ENOBUFS; p = kmalloc(optlen + 4, GFP_KERNEL); @@ -888,8 +888,8 @@ static int compat_ip_mcast_join_leave(struct sock *sk, int optname, DEFINE_STATIC_KEY_FALSE(ip4_min_ttl); -static int do_ip_setsockopt(struct sock *sk, int level, int optname, - sockptr_t optval, unsigned int optlen) +int do_ip_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); @@ -944,7 +944,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, err = 0; if (needs_rtnl) rtnl_lock(); - lock_sock(sk); + sockopt_lock_sock(sk); switch (optname) { case IP_OPTIONS: @@ -1233,7 +1233,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, if (optlen < IP_MSFILTER_SIZE(0)) goto e_inval; - if (optlen > sysctl_optmem_max) { + if (optlen > READ_ONCE(sysctl_optmem_max)) { err = -ENOBUFS; break; } @@ -1333,14 +1333,14 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, case IP_IPSEC_POLICY: case IP_XFRM_POLICY: err = -EPERM; - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) + if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) break; err = xfrm_user_policy(sk, optname, optval, optlen); break; case IP_TRANSPARENT: - if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && - !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && + !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { err = -EPERM; break; } @@ -1368,13 +1368,13 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, err = -ENOPROTOOPT; break; } - release_sock(sk); + sockopt_release_sock(sk); if (needs_rtnl) rtnl_unlock(); return err; e_inval: - release_sock(sk); + sockopt_release_sock(sk); if (needs_rtnl) rtnl_unlock(); return -EINVAL; @@ -1462,37 +1462,37 @@ static bool getsockopt_needs_rtnl(int optname) return false; } -static int ip_get_mcast_msfilter(struct sock *sk, void __user *optval, - int __user *optlen, int len) +static int ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval, + sockptr_t optlen, int len) { const int size0 = offsetof(struct group_filter, gf_slist_flex); - struct group_filter __user *p = optval; struct group_filter gsf; - int num; + int num, gsf_size; int err; if (len < size0) return -EINVAL; - if (copy_from_user(&gsf, p, size0)) + if (copy_from_sockptr(&gsf, optval, size0)) return -EFAULT; num = gsf.gf_numsrc; - err = ip_mc_gsfget(sk, &gsf, p->gf_slist_flex); + err = ip_mc_gsfget(sk, &gsf, optval, + offsetof(struct group_filter, gf_slist_flex)); if (err) return err; if (gsf.gf_numsrc < num) num = gsf.gf_numsrc; - if (put_user(GROUP_FILTER_SIZE(num), optlen) || - copy_to_user(p, &gsf, size0)) + gsf_size = GROUP_FILTER_SIZE(num); + if (copy_to_sockptr(optlen, &gsf_size, sizeof(int)) || + copy_to_sockptr(optval, &gsf, size0)) return -EFAULT; return 0; } -static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, - int __user *optlen, int len) +static int compat_ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval, + sockptr_t optlen, int len) { const int size0 = offsetof(struct compat_group_filter, gf_slist_flex); - struct compat_group_filter __user *p = optval; struct compat_group_filter gf32; struct group_filter gf; int num; @@ -1500,7 +1500,7 @@ static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, if (len < size0) return -EINVAL; - if (copy_from_user(&gf32, p, size0)) + if (copy_from_sockptr(&gf32, optval, size0)) return -EFAULT; gf.gf_interface = gf32.gf_interface; @@ -1508,21 +1508,24 @@ static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, num = gf.gf_numsrc = gf32.gf_numsrc; gf.gf_group = gf32.gf_group; - err = ip_mc_gsfget(sk, &gf, p->gf_slist_flex); + err = ip_mc_gsfget(sk, &gf, optval, + offsetof(struct compat_group_filter, gf_slist_flex)); if (err) return err; if (gf.gf_numsrc < num) num = gf.gf_numsrc; len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32)); - if (put_user(len, optlen) || - put_user(gf.gf_fmode, &p->gf_fmode) || - put_user(gf.gf_numsrc, &p->gf_numsrc)) + if (copy_to_sockptr(optlen, &len, sizeof(int)) || + copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_fmode), + &gf.gf_fmode, sizeof(gf.gf_fmode)) || + copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_numsrc), + &gf.gf_numsrc, sizeof(gf.gf_numsrc))) return -EFAULT; return 0; } -static int do_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) +int do_ip_getsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, sockptr_t optlen) { struct inet_sock *inet = inet_sk(sk); bool needs_rtnl = getsockopt_needs_rtnl(optname); @@ -1535,14 +1538,14 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, if (ip_mroute_opt(optname)) return ip_mroute_getsockopt(sk, optname, optval, optlen); - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len < 0) return -EINVAL; if (needs_rtnl) rtnl_lock(); - lock_sock(sk); + sockopt_lock_sock(sk); switch (optname) { case IP_OPTIONS: @@ -1558,17 +1561,19 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, memcpy(optbuf, &inet_opt->opt, sizeof(struct ip_options) + inet_opt->opt.optlen); - release_sock(sk); + sockopt_release_sock(sk); - if (opt->optlen == 0) - return put_user(0, optlen); + if (opt->optlen == 0) { + len = 0; + return copy_to_sockptr(optlen, &len, sizeof(int)); + } ip_options_undo(opt); len = min_t(unsigned int, len, opt->optlen); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, opt->__data, len)) + if (copy_to_sockptr(optval, opt->__data, len)) return -EFAULT; return 0; } @@ -1632,7 +1637,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, dst_release(dst); } if (!val) { - release_sock(sk); + sockopt_release_sock(sk); return -ENOTCONN; } break; @@ -1657,11 +1662,11 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, struct in_addr addr; len = min_t(unsigned int, len, sizeof(struct in_addr)); addr.s_addr = inet->mc_addr; - release_sock(sk); + sockopt_release_sock(sk); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &addr, len)) + if (copy_to_sockptr(optval, &addr, len)) return -EFAULT; return 0; } @@ -1673,12 +1678,11 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, err = -EINVAL; goto out; } - if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) { + if (copy_from_sockptr(&msf, optval, IP_MSFILTER_SIZE(0))) { err = -EFAULT; goto out; } - err = ip_mc_msfget(sk, &msf, - (struct ip_msfilter __user *)optval, optlen); + err = ip_mc_msfget(sk, &msf, optval, optlen); goto out; } case MCAST_MSFILTER: @@ -1695,13 +1699,18 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, { struct msghdr msg; - release_sock(sk); + sockopt_release_sock(sk); if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; - msg.msg_control_is_user = true; - msg.msg_control_user = optval; + if (optval.is_kernel) { + msg.msg_control_is_user = false; + msg.msg_control = optval.kernel; + } else { + msg.msg_control_is_user = true; + msg.msg_control_user = optval.user; + } msg.msg_controllen = len; msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0; @@ -1722,7 +1731,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos); } len -= msg.msg_controllen; - return put_user(len, optlen); + return copy_to_sockptr(optlen, &len, sizeof(int)); } case IP_FREEBIND: val = inet->freebind; @@ -1734,29 +1743,29 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, val = inet->min_ttl; break; default: - release_sock(sk); + sockopt_release_sock(sk); return -ENOPROTOOPT; } - release_sock(sk); + sockopt_release_sock(sk); if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { unsigned char ucval = (unsigned char)val; len = 1; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &ucval, 1)) + if (copy_to_sockptr(optval, &ucval, 1)) return -EFAULT; } else { len = min_t(unsigned int, sizeof(int), len); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, len)) + if (copy_to_sockptr(optval, &val, len)) return -EFAULT; } return 0; out: - release_sock(sk); + sockopt_release_sock(sk); if (needs_rtnl) rtnl_unlock(); return err; @@ -1767,7 +1776,8 @@ int ip_getsockopt(struct sock *sk, int level, { int err; - err = do_ip_getsockopt(sk, level, optname, optval, optlen); + err = do_ip_getsockopt(sk, level, optname, + USER_SOCKPTR(optval), USER_SOCKPTR(optlen)); #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_GET_INFO && diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index e65e948cab9f..019f3b0839c5 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -295,7 +295,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr, iph->saddr, tunnel->parms.o_key, RT_TOS(iph->tos), dev_net(dev), - tunnel->parms.link, tunnel->fwmark, 0); + tunnel->parms.link, tunnel->fwmark, 0, 0); rt = ip_route_output_key(tunnel->net, &fl4); if (!IS_ERR(rt)) { @@ -570,7 +570,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, tunnel_id_to_key32(key->tun_id), RT_TOS(tos), - dev_net(dev), 0, skb->mark, skb_get_hash(skb)); + dev_net(dev), 0, skb->mark, skb_get_hash(skb), + key->flow_flags); if (tunnel->encap.type != TUNNEL_ENCAP_NONE) goto tx_error; @@ -729,7 +730,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr, tunnel->parms.o_key, RT_TOS(tos), dev_net(dev), tunnel->parms.link, - tunnel->fwmark, skb_get_hash(skb)); + tunnel->fwmark, skb_get_hash(skb), 0); if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) goto tx_error; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 73651d17e51f..95eefbe2e142 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1546,7 +1546,8 @@ out: } /* Getsock opt support for the multicast routing system. */ -int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) +int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, + sockptr_t optlen) { int olr; int val; @@ -1577,14 +1578,14 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int return -ENOPROTOOPT; } - if (get_user(olr, optlen)) + if (copy_from_sockptr(&olr, optlen, sizeof(int))) return -EFAULT; olr = min_t(unsigned int, olr, sizeof(int)); if (olr < 0) return -EINVAL; - if (put_user(olr, optlen)) + if (copy_to_sockptr(optlen, &olr, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, olr)) + if (copy_to_sockptr(optval, &val, olr)) return -EFAULT; return 0; } diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index a334f0dcc2d0..faee20af4856 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -291,20 +291,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct, exp->expectfn = nf_nat_follow_master; exp->dir = !dir; - /* Try to get same port: if not, try to change it. */ - for (; nated_port != 0; nated_port++) { - int ret; - - exp->tuple.dst.u.tcp.port = htons(nated_port); - ret = nf_ct_expect_related(exp, 0); - if (ret == 0) - break; - else if (ret != -EBUSY) { - nated_port = 0; - break; - } - } - + nated_port = nf_nat_exp_find_port(exp, nated_port); if (nated_port == 0) { /* No port available */ net_notice_ratelimited("nf_nat_h323: out of TCP ports\n"); return 0; @@ -347,20 +334,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, if (info->sig_port[dir] == port) nated_port = ntohs(info->sig_port[!dir]); - /* Try to get same port: if not, try to change it. */ - for (; nated_port != 0; nated_port++) { - int ret; - - exp->tuple.dst.u.tcp.port = htons(nated_port); - ret = nf_ct_expect_related(exp, 0); - if (ret == 0) - break; - else if (ret != -EBUSY) { - nated_port = 0; - break; - } - } - + nated_port = nf_nat_exp_find_port(exp, nated_port); if (nated_port == 0) { /* No port available */ net_notice_ratelimited("nf_nat_q931: out of TCP ports\n"); return 0; @@ -439,20 +413,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, if (info->sig_port[dir] == port) nated_port = ntohs(info->sig_port[!dir]); - /* Try to get same port: if not, try to change it. */ - for (; nated_port != 0; nated_port++) { - int ret; - - exp->tuple.dst.u.tcp.port = htons(nated_port); - ret = nf_ct_expect_related(exp, 0); - if (ret == 0) - break; - else if (ret != -EBUSY) { - nated_port = 0; - break; - } - } - + nated_port = nf_nat_exp_find_port(exp, nated_port); if (nated_port == 0) { /* No port available */ net_notice_ratelimited("nf_nat_ras: out of TCP ports\n"); return 0; @@ -532,20 +493,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, exp->expectfn = ip_nat_callforwarding_expect; exp->dir = !dir; - /* Try to get same port: if not, try to change it. */ - for (nated_port = ntohs(port); nated_port != 0; nated_port++) { - int ret; - - exp->tuple.dst.u.tcp.port = htons(nated_port); - ret = nf_ct_expect_related(exp, 0); - if (ret == 0) - break; - else if (ret != -EBUSY) { - nated_port = 0; - break; - } - } - + nated_port = nf_nat_exp_find_port(exp, ntohs(port)); if (nated_port == 0) { /* No port available */ net_notice_ratelimited("nf_nat_q931: out of TCP ports\n"); return 0; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 970e9a2cca4a..8230be00ecca 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1000,7 +1000,7 @@ new_segment: i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, page, offset); - if (!can_coalesce && i >= sysctl_max_skb_frags) { + if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) { tcp_mark_push(tp, skb); goto new_segment; } @@ -1015,7 +1015,7 @@ new_segment: skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else { get_page(page); - skb_fill_page_desc(skb, i, page, offset, copy); + skb_fill_page_desc_noacc(skb, i, page, offset, copy); } if (!(flags & MSG_NO_SHARED_FRAGS)) @@ -1354,7 +1354,7 @@ new_segment: if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { - if (i >= sysctl_max_skb_frags) { + if (i >= READ_ONCE(sysctl_max_skb_frags)) { tcp_mark_push(tp, skb); goto new_segment; } @@ -1567,17 +1567,11 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) * calculation of whether or not we must ACK for the sake of * a window update. */ -void tcp_cleanup_rbuf(struct sock *sk, int copied) +static void __tcp_cleanup_rbuf(struct sock *sk, int copied) { struct tcp_sock *tp = tcp_sk(sk); bool time_to_ack = false; - struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); - - WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), - "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", - tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); - if (inet_csk_ack_scheduled(sk)) { const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1623,6 +1617,17 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) tcp_send_ack(sk); } +void tcp_cleanup_rbuf(struct sock *sk, int copied) +{ + struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); + struct tcp_sock *tp = tcp_sk(sk); + + WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), + "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", + tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); + __tcp_cleanup_rbuf(sk, copied); +} + static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); @@ -1756,34 +1761,26 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) if (sk->sk_state == TCP_LISTEN) return -ENOTCONN; - while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { - int used; - - __skb_unlink(skb, &sk->sk_receive_queue); - used = recv_actor(sk, skb); - if (used <= 0) { - if (!copied) - copied = used; - break; - } - seq += used; - copied += used; + skb = tcp_recv_skb(sk, seq, &offset); + if (!skb) + return 0; - if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { - consume_skb(skb); + __skb_unlink(skb, &sk->sk_receive_queue); + WARN_ON(!skb_set_owner_sk_safe(skb, sk)); + copied = recv_actor(sk, skb); + if (copied >= 0) { + seq += copied; + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ++seq; - break; - } - consume_skb(skb); - break; } + consume_skb(skb); WRITE_ONCE(tp->copied_seq, seq); tcp_rcv_space_adjust(sk); /* Clean up data we have read: This will do ACK frames. */ if (copied > 0) - tcp_cleanup_rbuf(sk, copied); + __tcp_cleanup_rbuf(sk, copied); return copied; } @@ -3202,7 +3199,7 @@ EXPORT_SYMBOL(tcp_disconnect); static inline bool tcp_can_repair_sock(const struct sock *sk) { - return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && + return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && (sk->sk_state != TCP_LISTEN); } @@ -3479,8 +3476,8 @@ int tcp_set_window_clamp(struct sock *sk, int val) /* * Socket option code for TCP. */ -static int do_tcp_setsockopt(struct sock *sk, int level, int optname, - sockptr_t optval, unsigned int optlen) +int do_tcp_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); @@ -3502,11 +3499,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, return -EFAULT; name[val] = 0; - lock_sock(sk); - err = tcp_set_congestion_control(sk, name, true, - ns_capable(sock_net(sk)->user_ns, - CAP_NET_ADMIN)); - release_sock(sk); + sockopt_lock_sock(sk); + err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), + sockopt_ns_capable(sock_net(sk)->user_ns, + CAP_NET_ADMIN)); + sockopt_release_sock(sk); return err; } case TCP_ULP: { @@ -3522,9 +3519,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, return -EFAULT; name[val] = 0; - lock_sock(sk); + sockopt_lock_sock(sk); err = tcp_set_ulp(sk, name); - release_sock(sk); + sockopt_release_sock(sk); return err; } case TCP_FASTOPEN_KEY: { @@ -3557,7 +3554,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; - lock_sock(sk); + sockopt_lock_sock(sk); switch (optname) { case TCP_MAXSEG: @@ -3779,7 +3776,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, break; } - release_sock(sk); + sockopt_release_sock(sk); return err; } @@ -4043,15 +4040,15 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, return stats; } -static int do_tcp_getsockopt(struct sock *sk, int level, - int optname, char __user *optval, int __user *optlen) +int do_tcp_getsockopt(struct sock *sk, int level, + int optname, sockptr_t optval, sockptr_t optlen) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); int val, len; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); @@ -4101,15 +4098,15 @@ static int do_tcp_getsockopt(struct sock *sk, int level, case TCP_INFO: { struct tcp_info info; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; tcp_get_info(sk, &info); len = min_t(unsigned int, len, sizeof(info)); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &info, len)) + if (copy_to_sockptr(optval, &info, len)) return -EFAULT; return 0; } @@ -4119,7 +4116,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, size_t sz = 0; int attr; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; ca_ops = icsk->icsk_ca_ops; @@ -4127,9 +4124,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level, sz = ca_ops->get_info(sk, ~0U, &attr, &info); len = min_t(unsigned int, len, sz); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &info, len)) + if (copy_to_sockptr(optval, &info, len)) return -EFAULT; return 0; } @@ -4138,27 +4135,28 @@ static int do_tcp_getsockopt(struct sock *sk, int level, break; case TCP_CONGESTION: - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, TCP_CA_NAME_MAX); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) + if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) return -EFAULT; return 0; case TCP_ULP: - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); if (!icsk->icsk_ulp_ops) { - if (put_user(0, optlen)) + len = 0; + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; return 0; } - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) + if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) return -EFAULT; return 0; @@ -4166,15 +4164,15 @@ static int do_tcp_getsockopt(struct sock *sk, int level, u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; unsigned int key_len; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; key_len = tcp_fastopen_get_cipher(net, icsk, key) * TCP_FASTOPEN_KEY_LENGTH; len = min_t(unsigned int, len, key_len); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, key, len)) + if (copy_to_sockptr(optval, key, len)) return -EFAULT; return 0; } @@ -4200,7 +4198,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, case TCP_REPAIR_WINDOW: { struct tcp_repair_window opt; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len != sizeof(opt)) @@ -4215,7 +4213,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, opt.rcv_wnd = tp->rcv_wnd; opt.rcv_wup = tp->rcv_wup; - if (copy_to_user(optval, &opt, len)) + if (copy_to_sockptr(optval, &opt, len)) return -EFAULT; return 0; } @@ -4261,35 +4259,35 @@ static int do_tcp_getsockopt(struct sock *sk, int level, val = tp->save_syn; break; case TCP_SAVED_SYN: { - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; - lock_sock(sk); + sockopt_lock_sock(sk); if (tp->saved_syn) { if (len < tcp_saved_syn_len(tp->saved_syn)) { - if (put_user(tcp_saved_syn_len(tp->saved_syn), - optlen)) { - release_sock(sk); + len = tcp_saved_syn_len(tp->saved_syn); + if (copy_to_sockptr(optlen, &len, sizeof(int))) { + sockopt_release_sock(sk); return -EFAULT; } - release_sock(sk); + sockopt_release_sock(sk); return -EINVAL; } len = tcp_saved_syn_len(tp->saved_syn); - if (put_user(len, optlen)) { - release_sock(sk); + if (copy_to_sockptr(optlen, &len, sizeof(int))) { + sockopt_release_sock(sk); return -EFAULT; } - if (copy_to_user(optval, tp->saved_syn->data, len)) { - release_sock(sk); + if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { + sockopt_release_sock(sk); return -EFAULT; } tcp_saved_syn_free(tp); - release_sock(sk); + sockopt_release_sock(sk); } else { - release_sock(sk); + sockopt_release_sock(sk); len = 0; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; } return 0; @@ -4300,31 +4298,31 @@ static int do_tcp_getsockopt(struct sock *sk, int level, struct tcp_zerocopy_receive zc = {}; int err; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len < 0 || len < offsetofend(struct tcp_zerocopy_receive, length)) return -EINVAL; if (unlikely(len > sizeof(zc))) { - err = check_zeroed_user(optval + sizeof(zc), - len - sizeof(zc)); + err = check_zeroed_sockptr(optval, sizeof(zc), + len - sizeof(zc)); if (err < 1) return err == 0 ? -EINVAL : err; len = sizeof(zc); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; } - if (copy_from_user(&zc, optval, len)) + if (copy_from_sockptr(&zc, optval, len)) return -EFAULT; if (zc.reserved) return -EINVAL; if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) return -EINVAL; - lock_sock(sk); + sockopt_lock_sock(sk); err = tcp_zerocopy_receive(sk, &zc, &tss); err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, &zc, &len, err); - release_sock(sk); + sockopt_release_sock(sk); if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) goto zerocopy_rcv_cmsg; switch (len) { @@ -4354,7 +4352,7 @@ zerocopy_rcv_sk_err: zerocopy_rcv_inq: zc.inq = tcp_inq_hint(sk); zerocopy_rcv_out: - if (!err && copy_to_user(optval, &zc, len)) + if (!err && copy_to_sockptr(optval, &zc, len)) err = -EFAULT; return err; } @@ -4363,9 +4361,9 @@ zerocopy_rcv_out: return -ENOPROTOOPT; } - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, len)) + if (copy_to_sockptr(optval, &val, len)) return -EFAULT; return 0; } @@ -4390,7 +4388,8 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, if (level != SOL_TCP) return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); - return do_tcp_getsockopt(sk, level, optname, optval, optlen); + return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), + USER_SOCKPTR(optlen)); } EXPORT_SYMBOL(tcp_getsockopt); @@ -4436,12 +4435,16 @@ static void __tcp_alloc_md5sig_pool(void) * to memory. See smp_rmb() in tcp_get_md5sig_pool() */ smp_wmb(); - tcp_md5sig_pool_populated = true; + /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() + * and tcp_get_md5sig_pool(). + */ + WRITE_ONCE(tcp_md5sig_pool_populated, true); } bool tcp_alloc_md5sig_pool(void) { - if (unlikely(!tcp_md5sig_pool_populated)) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { mutex_lock(&tcp_md5sig_mutex); if (!tcp_md5sig_pool_populated) { @@ -4452,7 +4455,8 @@ bool tcp_alloc_md5sig_pool(void) mutex_unlock(&tcp_md5sig_mutex); } - return tcp_md5sig_pool_populated; + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + return READ_ONCE(tcp_md5sig_pool_populated); } EXPORT_SYMBOL(tcp_alloc_md5sig_pool); @@ -4468,7 +4472,8 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { local_bh_disable(); - if (tcp_md5sig_pool_populated) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (READ_ONCE(tcp_md5sig_pool_populated)) { /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ smp_rmb(); return this_cpu_ptr(&tcp_md5sig_pool); @@ -4739,6 +4744,12 @@ void __init tcp_init(void) SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); + tcp_hashinfo.bind2_bucket_cachep = + kmem_cache_create("tcp_bind2_bucket", + sizeof(struct inet_bind2_bucket), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC | + SLAB_ACCOUNT, + NULL); /* Size and allocate the main established and bind bucket * hash tables. @@ -4762,7 +4773,7 @@ void __init tcp_init(void) panic("TCP: failed to alloc ehash_locks"); tcp_hashinfo.bhash = alloc_large_system_hash("TCP bind", - sizeof(struct inet_bind_hashbucket), + 2 * sizeof(struct inet_bind_hashbucket), tcp_hashinfo.ehash_mask + 1, 17, /* one slot per 128 KB of memory */ 0, @@ -4771,9 +4782,12 @@ void __init tcp_init(void) 0, 64 * 1024); tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; + tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; for (i = 0; i < tcp_hashinfo.bhash_size; i++) { spin_lock_init(&tcp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); + spin_lock_init(&tcp_hashinfo.bhash2[i].lock); + INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 825b216d11f5..45cc7f1ca296 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -272,8 +272,9 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, * The request socket is not added to the ehash * because it's been added to the accept queue directly. */ + req->timeout = tcp_timeout_init(child); inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, - TCP_TIMEOUT_INIT, TCP_RTO_MAX); + req->timeout, TCP_RTO_MAX); refcount_set(&req->rsk_refcnt, 2); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ab5f0ea166f1..bc2ea12221f9 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2513,6 +2513,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp) return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } +static bool tcp_is_non_sack_preventing_reopen(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { + /* Hold old state until something *above* high_seq + * is ACKed. For Reno it is MUST to prevent false + * fast retransmits (RFC2582). SACK TCP is safe. */ + if (!tcp_any_retrans_done(sk)) + tp->retrans_stamp = 0; + return true; + } + return false; +} + /* People celebrate: "We love our President!" */ static bool tcp_try_undo_recovery(struct sock *sk) { @@ -2535,14 +2550,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) } else if (tp->rack.reo_wnd_persist) { tp->rack.reo_wnd_persist--; } - if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { - /* Hold old state until something *above* high_seq - * is ACKed. For Reno it is MUST to prevent false - * fast retransmits (RFC2582). SACK TCP is safe. */ - if (!tcp_any_retrans_done(sk)) - tp->retrans_stamp = 0; + if (tcp_is_non_sack_preventing_reopen(sk)) return true; - } tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; return false; @@ -2578,6 +2587,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; + if (tcp_is_non_sack_preventing_reopen(sk)) + return true; if (frto_undo || tcp_is_sack(tp)) { tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; @@ -3614,12 +3625,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, /* RFC 5961 7 [ACK Throttling] */ static void tcp_send_challenge_ack(struct sock *sk) { - /* unprotected vars, we dont care of overwrites */ - static u32 challenge_timestamp; - static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); - u32 count, now; + u32 count, now, ack_limit; /* First check our per-socket dupack rate limit. */ if (__tcp_oow_rate_limited(net, @@ -3627,18 +3635,22 @@ static void tcp_send_challenge_ack(struct sock *sk) &tp->last_oow_ack_time)) return; + ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit); + if (ack_limit == INT_MAX) + goto send_ack; + /* Then check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; - if (now != challenge_timestamp) { - u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit); + if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) { u32 half = (ack_limit + 1) >> 1; - challenge_timestamp = now; - WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit)); + WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now); + WRITE_ONCE(net->ipv4.tcp_challenge_count, half + prandom_u32_max(ack_limit)); } - count = READ_ONCE(challenge_count); + count = READ_ONCE(net->ipv4.tcp_challenge_count); if (count > 0) { - WRITE_ONCE(challenge_count, count - 1); + WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1); +send_ack: NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0c83780dc9bf..01b31f5c7aba 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -199,11 +199,12 @@ static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr, /* This will initiate an outgoing connection. */ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { + struct inet_bind_hashbucket *prev_addr_hashbucket = NULL; struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; + __be32 daddr, nexthop, prev_sk_rcv_saddr; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __be16 orig_sport, orig_dport; - __be32 daddr, nexthop; struct flowi4 *fl4; struct rtable *rt; int err; @@ -246,10 +247,28 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (!inet_opt || !inet_opt->opt.srr) daddr = fl4->daddr; - if (!inet->inet_saddr) + if (!inet->inet_saddr) { + if (inet_csk(sk)->icsk_bind2_hash) { + prev_addr_hashbucket = inet_bhashfn_portaddr(&tcp_hashinfo, + sk, sock_net(sk), + inet->inet_num); + prev_sk_rcv_saddr = sk->sk_rcv_saddr; + } inet->inet_saddr = fl4->saddr; + } + sk_rcv_saddr_set(sk, inet->inet_saddr); + if (prev_addr_hashbucket) { + err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk); + if (err) { + inet->inet_saddr = 0; + sk_rcv_saddr_set(sk, prev_sk_rcv_saddr); + ip_rt_put(rt); + return err; + } + } + if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; @@ -3139,8 +3158,10 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_tso_win_divisor = 3; /* Default TSQ limit of 16 TSO segments */ net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536; - /* rfc5961 challenge ack rate limiting */ - net->ipv4.sysctl_tcp_challenge_ack_limit = 1000; + + /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */ + net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX; + net->ipv4.sysctl_tcp_min_tso_segs = 2; net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */ net->ipv4.sysctl_tcp_min_rtt_wlen = 300; diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index d58e672be31c..82f4575f9cd9 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -969,6 +969,7 @@ static struct genl_family tcp_metrics_nl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = tcp_metrics_nl_ops, .n_small_ops = ARRAY_SIZE(tcp_metrics_nl_ops), + .resv_start_op = TCP_METRICS_CMD_DEL + 1, }; static unsigned int tcpmhash_entries; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 30abde86db45..a844a0d38482 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -195,12 +195,9 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) off = skb_gro_offset(skb); hlen = off + sizeof(*th); - th = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - th = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!th)) - goto out; - } + th = skb_gro_header(skb, hlen, off); + if (unlikely(!th)) + goto out; thlen = th->doff * 4; if (thlen < sizeof(*th)) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 78b654ff421b..290019de766d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -239,7 +239,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, if (wscale_ok) { /* Set window scaling on max possible window */ space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); - space = max_t(u32, space, sysctl_rmem_max); + space = max_t(u32, space, READ_ONCE(sysctl_rmem_max)); space = min_t(u32, space, *window_clamp); *rcv_wscale = clamp_t(int, ilog2(space) - 15, 0, TCP_MAX_WSCALE); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b4dfb82d6ecb..cb79127f45c3 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -428,7 +428,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) if (!tp->retrans_stamp) tp->retrans_stamp = tcp_time_stamp(tp); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); + req->timeout << req->num_timeout, TCP_RTO_MAX); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 34eda973bbf1..cd72158e953a 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -783,6 +783,8 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) */ if (tunnel) { /* ...not for tunnels though: we don't have a sending socket */ + if (udp_sk(sk)->encap_err_rcv) + udp_sk(sk)->encap_err_rcv(sk, skb, iph->ihl << 2); goto out; } if (!inet->recverr) { diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c index 8efaf8c3fe2a..8242c8947340 100644 --- a/net/ipv4/udp_tunnel_core.c +++ b/net/ipv4/udp_tunnel_core.c @@ -72,6 +72,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, udp_sk(sk)->encap_type = cfg->encap_type; udp_sk(sk)->encap_rcv = cfg->encap_rcv; + udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv; udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup; udp_sk(sk)->encap_destroy = cfg->encap_destroy; udp_sk(sk)->gro_receive = cfg->gro_receive; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index b624e3d8c5f0..10ce86bf228e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3557,11 +3557,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, fallthrough; case NETDEV_UP: case NETDEV_CHANGE: - if (dev->flags & IFF_SLAVE) + if (idev && idev->cnf.disable_ipv6) break; - if (idev && idev->cnf.disable_ipv6) + if (dev->flags & IFF_SLAVE) { + if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) && + dev->flags & IFF_UP && dev->flags & IFF_MULTICAST) + ipv6_mc_up(idev); break; + } if (event == NETDEV_UP) { /* restore routes for permanent addresses */ @@ -7162,9 +7166,8 @@ static int __net_init addrconf_init_net(struct net *net) if (!dflt) goto err_alloc_dflt; - if (IS_ENABLED(CONFIG_SYSCTL) && - !net_eq(net, &init_net)) { - switch (sysctl_devconf_inherit_init_net) { + if (!net_eq(net, &init_net)) { + switch (net_inherit_devconf()) { case 1: /* copy from init_net */ memcpy(all, init_net.ipv6.devconf_all, sizeof(ipv6_devconf)); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 2ce0c44d0081..19732b5dce23 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -1057,6 +1057,8 @@ static const struct ipv6_stub ipv6_stub_impl = { static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = { .inet6_bind = __inet6_bind, .udp6_lib_lookup = __udp6_lib_lookup, + .ipv6_setsockopt = do_ipv6_setsockopt, + .ipv6_getsockopt = do_ipv6_getsockopt, }; static int __init inet6_init(void) diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c index 36c58aa257e8..3faf62530d6a 100644 --- a/net/ipv6/ila/ila_main.c +++ b/net/ipv6/ila/ila_main.c @@ -55,6 +55,7 @@ struct genl_family ila_nl_family __ro_after_init = { .module = THIS_MODULE, .ops = ila_nl_ops, .n_ops = ARRAY_SIZE(ila_nl_ops), + .resv_start_op = ILA_CMD_FLUSH + 1, }; static __net_init int ila_init_net(struct net *net) diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c index 1098131ed90c..571f0e4d9cf3 100644 --- a/net/ipv6/ioam6.c +++ b/net/ipv6/ioam6.c @@ -619,6 +619,7 @@ static struct genl_family ioam6_genl_family __ro_after_init = { .parallel_ops = true, .ops = ioam6_genl_ops, .n_ops = ARRAY_SIZE(ioam6_genl_ops), + .resv_start_op = IOAM6_CMD_NS_SET_SCHEMA + 1, .module = THIS_MODULE, }; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 80cb50d459e4..48b4ff0294f6 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -360,7 +360,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, if (parms->name[0]) { if (!dev_valid_name(parms->name)) return NULL; - strlcpy(name, parms->name, IFNAMSIZ); + strscpy(name, parms->name, IFNAMSIZ); } else { strcpy(name, "ip6gre%d"); } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index d12dba2dd535..d37a8c97e6de 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -219,12 +219,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, off = skb_gro_offset(skb); hlen = off + sizeof(*iph); - iph = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - iph = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!iph)) - goto out; - } + iph = skb_gro_header_slow(skb, hlen, off); + if (unlikely(!iph)) + goto out; skb_set_network_header(skb, off); skb_gro_pull(skb, sizeof(*iph)); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f152e51242cb..a60176e913a8 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1648,10 +1648,7 @@ alloc_new_skb: (fraglen + alloc_extra < SKB_MAX_ALLOC || !(rt->dst.dev->features & NETIF_F_SG))) alloclen = fraglen; - else if (!zc) { - alloclen = min_t(int, fraglen, MAX_HEADER); - pagedlen = fraglen - alloclen; - } else { + else { alloclen = fragheaderlen + transhdrlen; pagedlen = datalen - transhdrlen; } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 3fda5634578c..9e97f3b4c7e8 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -293,7 +293,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) if (p->name[0]) { if (!dev_valid_name(p->name)) goto failed; - strlcpy(name, p->name, IFNAMSIZ); + strscpy(name, p->name, IFNAMSIZ); } else { sprintf(name, "ip6tnl%%d"); } @@ -1517,7 +1517,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) * ip6_tnl_change() updates the tunnel parameters **/ -static int +static void ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) { t->parms.laddr = p->laddr; @@ -1531,29 +1531,25 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) t->parms.fwmark = p->fwmark; dst_cache_reset(&t->dst_cache); ip6_tnl_link_config(t); - return 0; } -static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) +static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) { struct net *net = t->net; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); - int err; ip6_tnl_unlink(ip6n, t); synchronize_net(); - err = ip6_tnl_change(t, p); + ip6_tnl_change(t, p); ip6_tnl_link(ip6n, t); netdev_state_change(t->dev); - return err; } -static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) +static void ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) { /* for default tnl0 device allow to change only the proto */ t->parms.proto = p->proto; netdev_state_change(t->dev); - return 0; } static void @@ -1667,9 +1663,9 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr, } else t = netdev_priv(dev); if (dev == ip6n->fb_tnl_dev) - err = ip6_tnl0_update(t, &p1); + ip6_tnl0_update(t, &p1); else - err = ip6_tnl_update(t, &p1); + ip6_tnl_update(t, &p1); } if (!IS_ERR(t)) { err = 0; @@ -2091,7 +2087,8 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], } else t = netdev_priv(dev); - return ip6_tnl_update(t, &p); + ip6_tnl_update(t, &p); + return 0; } static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 8fe59a79e800..1ddd60d06830 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -211,7 +211,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p if (p->name[0]) { if (!dev_valid_name(p->name)) goto failed; - strlcpy(name, p->name, IFNAMSIZ); + strscpy(name, p->name, IFNAMSIZ); } else { sprintf(name, "ip6_vti%%d"); } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index a9ba41648e36..516e83b52f26 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1827,8 +1827,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, * Getsock opt support for the multicast routing system. */ -int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, - int __user *optlen) +int ip6_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, + sockptr_t optlen) { int olr; int val; @@ -1859,16 +1859,16 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, return -ENOPROTOOPT; } - if (get_user(olr, optlen)) + if (copy_from_sockptr(&olr, optlen, sizeof(int))) return -EFAULT; olr = min_t(int, olr, sizeof(int)); if (olr < 0) return -EINVAL; - if (put_user(olr, optlen)) + if (copy_to_sockptr(optlen, &olr, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, olr)) + if (copy_to_sockptr(optval, &val, olr)) return -EFAULT; return 0; } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 222f6bf220ba..2d2f4dd9e5df 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -210,7 +210,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval, if (optlen < GROUP_FILTER_SIZE(0)) return -EINVAL; - if (optlen > sysctl_optmem_max) + if (optlen > READ_ONCE(sysctl_optmem_max)) return -ENOBUFS; gsf = memdup_sockptr(optval, optlen); @@ -244,7 +244,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval, if (optlen < size0) return -EINVAL; - if (optlen > sysctl_optmem_max - 4) + if (optlen > READ_ONCE(sysctl_optmem_max) - 4) return -ENOBUFS; p = kmalloc(optlen + 4, GFP_KERNEL); @@ -327,7 +327,7 @@ static int ipv6_set_opt_hdr(struct sock *sk, int optname, sockptr_t optval, int err; /* hop-by-hop / destination options are privileged option */ - if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) + if (optname != IPV6_RTHDR && !sockopt_ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; /* remove any sticky options header with a zero option @@ -391,8 +391,8 @@ sticky_done: return err; } -static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, - sockptr_t optval, unsigned int optlen) +int do_ipv6_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); @@ -417,7 +417,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (needs_rtnl) rtnl_lock(); - lock_sock(sk); + sockopt_lock_sock(sk); switch (optname) { @@ -634,8 +634,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; case IPV6_TRANSPARENT: - if (valbool && !ns_capable(net->user_ns, CAP_NET_RAW) && - !ns_capable(net->user_ns, CAP_NET_ADMIN)) { + if (valbool && !sockopt_ns_capable(net->user_ns, CAP_NET_RAW) && + !sockopt_ns_capable(net->user_ns, CAP_NET_ADMIN)) { retv = -EPERM; break; } @@ -946,7 +946,7 @@ done: case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: retv = -EPERM; - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + if (!sockopt_ns_capable(net->user_ns, CAP_NET_ADMIN)) break; retv = xfrm_user_policy(sk, optname, optval, optlen); break; @@ -994,14 +994,14 @@ done: break; } - release_sock(sk); + sockopt_release_sock(sk); if (needs_rtnl) rtnl_unlock(); return retv; e_inval: - release_sock(sk); + sockopt_release_sock(sk); if (needs_rtnl) rtnl_unlock(); return -EINVAL; @@ -1030,7 +1030,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, EXPORT_SYMBOL(ipv6_setsockopt); static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, - int optname, char __user *optval, int len) + int optname, sockptr_t optval, int len) { struct ipv6_opt_hdr *hdr; @@ -1058,56 +1058,53 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, return 0; len = min_t(unsigned int, len, ipv6_optlen(hdr)); - if (copy_to_user(optval, hdr, len)) + if (copy_to_sockptr(optval, hdr, len)) return -EFAULT; return len; } -static int ipv6_get_msfilter(struct sock *sk, void __user *optval, - int __user *optlen, int len) +static int ipv6_get_msfilter(struct sock *sk, sockptr_t optval, + sockptr_t optlen, int len) { const int size0 = offsetof(struct group_filter, gf_slist_flex); - struct group_filter __user *p = optval; struct group_filter gsf; int num; int err; if (len < size0) return -EINVAL; - if (copy_from_user(&gsf, p, size0)) + if (copy_from_sockptr(&gsf, optval, size0)) return -EFAULT; if (gsf.gf_group.ss_family != AF_INET6) return -EADDRNOTAVAIL; num = gsf.gf_numsrc; - lock_sock(sk); - err = ip6_mc_msfget(sk, &gsf, p->gf_slist_flex); + sockopt_lock_sock(sk); + err = ip6_mc_msfget(sk, &gsf, optval, size0); if (!err) { if (num > gsf.gf_numsrc) num = gsf.gf_numsrc; - if (put_user(GROUP_FILTER_SIZE(num), optlen) || - copy_to_user(p, &gsf, size0)) + len = GROUP_FILTER_SIZE(num); + if (copy_to_sockptr(optlen, &len, sizeof(int)) || + copy_to_sockptr(optval, &gsf, size0)) err = -EFAULT; } - release_sock(sk); + sockopt_release_sock(sk); return err; } -static int compat_ipv6_get_msfilter(struct sock *sk, void __user *optval, - int __user *optlen) +static int compat_ipv6_get_msfilter(struct sock *sk, sockptr_t optval, + sockptr_t optlen, int len) { const int size0 = offsetof(struct compat_group_filter, gf_slist_flex); - struct compat_group_filter __user *p = optval; struct compat_group_filter gf32; struct group_filter gf; - int len, err; + int err; int num; - if (get_user(len, optlen)) - return -EFAULT; if (len < size0) return -EINVAL; - if (copy_from_user(&gf32, p, size0)) + if (copy_from_sockptr(&gf32, optval, size0)) return -EFAULT; gf.gf_interface = gf32.gf_interface; gf.gf_fmode = gf32.gf_fmode; @@ -1117,23 +1114,25 @@ static int compat_ipv6_get_msfilter(struct sock *sk, void __user *optval, if (gf.gf_group.ss_family != AF_INET6) return -EADDRNOTAVAIL; - lock_sock(sk); - err = ip6_mc_msfget(sk, &gf, p->gf_slist_flex); - release_sock(sk); + sockopt_lock_sock(sk); + err = ip6_mc_msfget(sk, &gf, optval, size0); + sockopt_release_sock(sk); if (err) return err; if (num > gf.gf_numsrc) num = gf.gf_numsrc; len = GROUP_FILTER_SIZE(num) - (sizeof(gf)-sizeof(gf32)); - if (put_user(len, optlen) || - put_user(gf.gf_fmode, &p->gf_fmode) || - put_user(gf.gf_numsrc, &p->gf_numsrc)) + if (copy_to_sockptr(optlen, &len, sizeof(int)) || + copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_fmode), + &gf.gf_fmode, sizeof(gf32.gf_fmode)) || + copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_numsrc), + &gf.gf_numsrc, sizeof(gf32.gf_numsrc))) return -EFAULT; return 0; } -static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen, unsigned int flags) +int do_ipv6_getsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, sockptr_t optlen) { struct ipv6_pinfo *np = inet6_sk(sk); int len; @@ -1142,7 +1141,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, if (ip6_mroute_opt(optname)) return ip6_mroute_getsockopt(sk, optname, optval, optlen); - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; switch (optname) { case IPV6_ADDRFORM: @@ -1156,7 +1155,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, break; case MCAST_MSFILTER: if (in_compat_syscall()) - return compat_ipv6_get_msfilter(sk, optval, optlen); + return compat_ipv6_get_msfilter(sk, optval, optlen, len); return ipv6_get_msfilter(sk, optval, optlen, len); case IPV6_2292PKTOPTIONS: { @@ -1166,16 +1165,21 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; - msg.msg_control_user = optval; + if (optval.is_kernel) { + msg.msg_control_is_user = false; + msg.msg_control = optval.kernel; + } else { + msg.msg_control_is_user = true; + msg.msg_control_user = optval.user; + } msg.msg_controllen = len; - msg.msg_flags = flags; - msg.msg_control_is_user = true; + msg.msg_flags = 0; - lock_sock(sk); + sockopt_lock_sock(sk); skb = np->pktoptions; if (skb) ip6_datagram_recv_ctl(sk, &msg, skb); - release_sock(sk); + sockopt_release_sock(sk); if (!skb) { if (np->rxopt.bits.rxinfo) { struct in6_pktinfo src_info; @@ -1212,7 +1216,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, } } len -= msg.msg_controllen; - return put_user(len, optlen); + return copy_to_sockptr(optlen, &len, sizeof(int)); } case IPV6_MTU: { @@ -1264,15 +1268,15 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, { struct ipv6_txoptions *opt; - lock_sock(sk); + sockopt_lock_sock(sk); opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len); - release_sock(sk); + sockopt_release_sock(sk); /* check if ipv6_getsockopt_sticky() returns err code */ if (len < 0) return len; - return put_user(len, optlen); + return copy_to_sockptr(optlen, &len, sizeof(int)); } case IPV6_RECVHOPOPTS: @@ -1326,9 +1330,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, if (!mtuinfo.ip6m_mtu) return -ENOTCONN; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &mtuinfo, len)) + if (copy_to_sockptr(optval, &mtuinfo, len)) return -EFAULT; return 0; @@ -1405,7 +1409,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, if (len < sizeof(freq)) return -EINVAL; - if (copy_from_user(&freq, optval, sizeof(freq))) + if (copy_from_sockptr(&freq, optval, sizeof(freq))) return -EFAULT; if (freq.flr_action != IPV6_FL_A_GET) @@ -1420,9 +1424,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, if (val < 0) return val; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &freq, len)) + if (copy_to_sockptr(optval, &freq, len)) return -EFAULT; return 0; @@ -1474,9 +1478,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, len)) + if (copy_to_sockptr(optval, &val, len)) return -EFAULT; return 0; } @@ -1492,7 +1496,8 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, if (level != SOL_IPV6) return -ENOPROTOOPT; - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0); + err = do_ipv6_getsockopt(sk, level, optname, + USER_SOCKPTR(optval), USER_SOCKPTR(optlen)); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 87c699d57b36..0566ab03ddbe 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -580,7 +580,7 @@ done: } int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, - struct sockaddr_storage __user *p) + sockptr_t optval, size_t ss_offset) { struct ipv6_pinfo *inet6 = inet6_sk(sk); const struct in6_addr *group; @@ -612,8 +612,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; gsf->gf_numsrc = count; - - for (i = 0; i < copycount; i++, p++) { + for (i = 0; i < copycount; i++) { struct sockaddr_in6 *psin6; struct sockaddr_storage ss; @@ -621,8 +620,9 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, memset(&ss, 0, sizeof(ss)); psin6->sin6_family = AF_INET6; psin6->sin6_addr = psl->sl_addr[i]; - if (copy_to_user(p, &ss, sizeof(ss))) + if (copy_to_sockptr_offset(optval, ss_offset, &ss, sizeof(ss))) return -EFAULT; + ss_offset += sizeof(ss); } return 0; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 98453693e400..3a553494ff16 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1378,6 +1378,9 @@ static void ndisc_router_discovery(struct sk_buff *skb) if (!rt && lifetime) { ND_PRINTK(3, info, "RA: adding default router\n"); + if (neigh) + neigh_release(neigh); + rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr, skb->dev, pref, defrtr_usr_metric); if (!rt) { diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 7dd3629dd19e..38db0064d661 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -86,7 +86,6 @@ static int nf_ct_frag6_sysctl_register(struct net *net) table[1].extra2 = &nf_frag->fqdir->high_thresh; table[2].data = &nf_frag->fqdir->high_thresh; table[2].extra1 = &nf_frag->fqdir->low_thresh; - table[2].extra2 = &nf_frag->fqdir->high_thresh; hdr = register_net_sysctl(net, "net/netfilter", table); if (hdr == NULL) diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 73aaabf0e966..29346a6eec9f 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -191,6 +191,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } + if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) { + err = -EINVAL; + goto out_unlock; + } + if (hinfo) { err = seg6_hmac_info_del(net, hmackeyid); if (err) @@ -499,6 +504,7 @@ static struct genl_family seg6_genl_family __ro_after_init = { .parallel_ops = true, .ops = seg6_genl_ops, .n_ops = ARRAY_SIZE(seg6_genl_ops), + .resv_start_op = SEG6_CMD_GET_TUNSRC + 1, .module = THIS_MODULE, }; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 6b73b7a5f175..98f1cf40746f 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -254,7 +254,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, if (parms->name[0]) { if (!dev_valid_name(parms->name)) goto failed; - strlcpy(name, parms->name, IFNAMSIZ); + strscpy(name, parms->name, IFNAMSIZ); } else { strcpy(name, "sit%d"); } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e54eee80ce5f..35013497e407 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -287,8 +287,25 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, } if (!saddr) { + struct inet_bind_hashbucket *prev_addr_hashbucket = NULL; + struct in6_addr prev_v6_rcv_saddr; + + if (icsk->icsk_bind2_hash) { + prev_addr_hashbucket = inet_bhashfn_portaddr(&tcp_hashinfo, + sk, sock_net(sk), + inet->inet_num); + prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr; + } saddr = &fl6.saddr; sk->sk_v6_rcv_saddr = *saddr; + + if (prev_addr_hashbucket) { + err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk); + if (err) { + sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr; + goto failure; + } + } } /* set the source address */ @@ -841,7 +858,7 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, int rst, - u8 tclass, __be32 label, u32 priority) + u8 tclass, __be32 label, u32 priority, u32 txhash) { const struct tcphdr *th = tcp_hdr(skb); struct tcphdr *t1; @@ -932,16 +949,16 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 } if (sk) { - if (sk->sk_state == TCP_TIME_WAIT) { + if (sk->sk_state == TCP_TIME_WAIT) mark = inet_twsk(sk)->tw_mark; - /* autoflowlabel relies on buff->hash */ - skb_set_hash(buff, inet_twsk(sk)->tw_txhash, - PKT_HASH_TYPE_L4); - } else { + else mark = sk->sk_mark; - } skb_set_delivery_time(buff, tcp_transmit_time(sk), true); } + if (txhash) { + /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */ + skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4); + } fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark; fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; @@ -1068,7 +1085,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) } tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, - ipv6_get_dsfield(ipv6h), label, priority); + ipv6_get_dsfield(ipv6h), label, priority, 0); #ifdef CONFIG_TCP_MD5SIG out: @@ -1079,10 +1096,10 @@ out: static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, u8 tclass, - __be32 label, u32 priority) + __be32 label, u32 priority, u32 txhash) { tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, - tclass, label, priority); + tclass, label, priority, txhash); } static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) @@ -1094,7 +1111,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp_raw() + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), - tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority); + tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority, + tw->tw_txhash); inet_twsk_put(tw); } @@ -1121,7 +1139,8 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index), - ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority); + ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority, + tcp_rsk(req)->txhash); } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 16c176e7c69a..3366d6a77ff2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -616,8 +616,11 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } /* Tunnels don't have an application socket: don't pass errors back */ - if (tunnel) + if (tunnel) { + if (udp_sk(sk)->encap_err_rcv) + udp_sk(sk)->encap_err_rcv(sk, skb, offset); goto out; + } if (!np->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 71899e5a5a11..1215c863e1c4 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1412,12 +1412,6 @@ static int kcm_attach(struct socket *sock, struct socket *csock, psock->sk = csk; psock->bpf_prog = prog; - err = strp_init(&psock->strp, csk, &cb); - if (err) { - kmem_cache_free(kcm_psockp, psock); - goto out; - } - write_lock_bh(&csk->sk_callback_lock); /* Check if sk_user_data is already by KCM or someone else. @@ -1425,13 +1419,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock, */ if (csk->sk_user_data) { write_unlock_bh(&csk->sk_callback_lock); - strp_stop(&psock->strp); - strp_done(&psock->strp); kmem_cache_free(kcm_psockp, psock); err = -EALREADY; goto out; } + err = strp_init(&psock->strp, csk, &cb); + if (err) { + write_unlock_bh(&csk->sk_callback_lock); + kmem_cache_free(kcm_psockp, psock); + goto out; + } + psock->save_data_ready = csk->sk_data_ready; psock->save_write_space = csk->sk_write_space; psock->save_state_change = csk->sk_state_change; diff --git a/net/key/af_key.c b/net/key/af_key.c index fda2dcc8a383..c85df5b958d2 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1697,9 +1697,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad pfk->registered |= (1<<hdr->sadb_msg_satype); } + mutex_lock(&pfkey_mutex); xfrm_probe_algs(); supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO); + mutex_unlock(&pfkey_mutex); + if (!supp_skb) { if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC) pfk->registered &= ~(1<<hdr->sadb_msg_satype); diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 6cd97c75445c..f2ae03c40473 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -254,7 +254,7 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, int rc; if (cfg->ifname) { - strlcpy(name, cfg->ifname, IFNAMSIZ); + strscpy(name, cfg->ifname, IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { strcpy(name, L2TP_ETH_DEV_NAME); @@ -314,7 +314,7 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, return rc; } - strlcpy(session->ifname, dev->name, IFNAMSIZ); + strscpy(session->ifname, dev->name, IFNAMSIZ); rcu_assign_pointer(spriv->dev, dev); rtnl_unlock(); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 96eb91be9238..a901fd14fe3b 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -989,6 +989,7 @@ static struct genl_family l2tp_nl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = l2tp_nl_ops, .n_small_ops = ARRAY_SIZE(l2tp_nl_ops), + .resv_start_op = L2TP_CMD_SESSION_GET + 1, .mcgrps = l2tp_multicast_group, .n_mcgrps = ARRAY_SIZE(l2tp_multicast_group), }; diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index af1df3a6bd55..b8de44da1fb8 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -16,6 +16,7 @@ mac80211-y := \ s1g.o \ ibss.o \ iface.o \ + link.o \ rate.o \ michael.o \ tkip.o \ diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index a4f6971b7a19..687b4c878d4a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -23,6 +23,30 @@ #include "mesh.h" #include "wme.h" +static struct ieee80211_link_data * +ieee80211_link_or_deflink(struct ieee80211_sub_if_data *sdata, int link_id, + bool require_valid) +{ + struct ieee80211_link_data *link; + + if (link_id < 0) { + /* + * For keys, if sdata is not an MLD, we might not use + * the return value at all (if it's not a pairwise key), + * so in that case (require_valid==false) don't error. + */ + if (require_valid && sdata->vif.valid_links) + return ERR_PTR(-EINVAL); + + return &sdata->deflink; + } + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + return ERR_PTR(-ENOLINK); + return link; +} + static void ieee80211_set_mu_mimo_follow(struct ieee80211_sub_if_data *sdata, struct vif_params *params) { @@ -202,6 +226,10 @@ static int ieee80211_change_iface(struct wiphy *wiphy, if (params->use_4addr == ifmgd->use_4addr) return 0; + /* FIXME: no support for 4-addr MLO yet */ + if (sdata->vif.valid_links) + return -EOPNOTSUPP; + sdata->u.mgd.use_4addr = params->use_4addr; if (!ifmgd->associated) return 0; @@ -434,10 +462,12 @@ static int ieee80211_set_tx(struct ieee80211_sub_if_data *sdata, } static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, bool pairwise, const u8 *mac_addr, - struct key_params *params) + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr, struct key_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, link_id, false); struct ieee80211_local *local = sdata->local; struct sta_info *sta = NULL; struct ieee80211_key *key; @@ -446,6 +476,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; + if (IS_ERR(link)) + return PTR_ERR(link); + if (pairwise && params->mode == NL80211_KEY_SET_TX) return ieee80211_set_tx(sdata, mac_addr, key_idx); @@ -454,6 +487,8 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_WEP104: + if (link_id >= 0) + return -EINVAL; if (WARN_ON_ONCE(fips_enabled)) return -EINVAL; break; @@ -466,6 +501,8 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, if (IS_ERR(key)) return PTR_ERR(key); + key->conf.link_id = link_id; + if (pairwise) key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; @@ -527,7 +564,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, break; } - err = ieee80211_key_link(key, sdata, sta); + err = ieee80211_key_link(key, link, sta); out_unlock: mutex_unlock(&local->sta_mtx); @@ -536,18 +573,37 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, } static struct ieee80211_key * -ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, +ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, int link_id, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct ieee80211_local *local = sdata->local; + struct ieee80211_link_data *link = &sdata->deflink; struct ieee80211_key *key; - struct sta_info *sta; + + if (link_id >= 0) { + link = rcu_dereference_check(sdata->link[link_id], + lockdep_is_held(&sdata->wdev.mtx)); + if (!link) + return NULL; + } if (mac_addr) { + struct sta_info *sta; + struct link_sta_info *link_sta; + sta = sta_info_get_bss(sdata, mac_addr); if (!sta) return NULL; + if (link_id >= 0) { + link_sta = rcu_dereference_check(sta->link[link_id], + lockdep_is_held(&local->sta_mtx)); + if (!link_sta) + return NULL; + } else { + link_sta = &sta->deflink; + } + if (pairwise && key_idx < NUM_DEFAULT_KEYS) return rcu_dereference_check_key_mtx(local, sta->ptk[key_idx]); @@ -557,7 +613,7 @@ ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS) return rcu_dereference_check_key_mtx(local, - sta->deflink.gtk[key_idx]); + link_sta->gtk[key_idx]); return NULL; } @@ -566,7 +622,7 @@ ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, return rcu_dereference_check_key_mtx(local, sdata->keys[key_idx]); - key = rcu_dereference_check_key_mtx(local, sdata->deflink.gtk[key_idx]); + key = rcu_dereference_check_key_mtx(local, link->gtk[key_idx]); if (key) return key; @@ -578,7 +634,8 @@ ieee80211_lookup_key(struct ieee80211_sub_if_data *sdata, } static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, bool pairwise, const u8 *mac_addr) + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; @@ -588,7 +645,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, mutex_lock(&local->sta_mtx); mutex_lock(&local->key_mtx); - key = ieee80211_lookup_key(sdata, key_idx, pairwise, mac_addr); + key = ieee80211_lookup_key(sdata, link_id, key_idx, pairwise, mac_addr); if (!key) { ret = -ENOENT; goto out_unlock; @@ -605,8 +662,8 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, } static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, bool pairwise, const u8 *mac_addr, - void *cookie, + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params *params)) { @@ -624,7 +681,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, rcu_read_lock(); - key = ieee80211_lookup_key(sdata, key_idx, pairwise, mac_addr); + key = ieee80211_lookup_key(sdata, link_id, key_idx, pairwise, mac_addr); if (!key) goto out; @@ -711,34 +768,49 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, static int ieee80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, bool uni, + int link_id, u8 key_idx, bool uni, bool multi) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, link_id, false); - ieee80211_set_default_key(sdata, key_idx, uni, multi); + if (IS_ERR(link)) + return PTR_ERR(link); + + ieee80211_set_default_key(link, key_idx, uni, multi); return 0; } static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx) + int link_id, u8 key_idx) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, link_id, true); + + if (IS_ERR(link)) + return PTR_ERR(link); - ieee80211_set_default_mgmt_key(sdata, key_idx); + ieee80211_set_default_mgmt_key(link, key_idx); return 0; } static int ieee80211_config_default_beacon_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx) + int link_id, u8 key_idx) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, link_id, true); + + if (IS_ERR(link)) + return PTR_ERR(link); - ieee80211_set_default_beacon_key(sdata, key_idx); + ieee80211_set_default_beacon_key(link, key_idx); return 0; } @@ -1610,6 +1682,18 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&local->sta_mtx)); + /* + * If there are no changes, then accept a link that doesn't exist, + * unless it's a new link. + */ + if (params->link_id < 0 && !new_link && + !params->link_mac && !params->txpwr_set && + !params->supported_rates_len && + !params->ht_capa && !params->vht_capa && + !params->he_capa && !params->eht_capa && + !params->opmode_notif_used) + return 0; + if (!link || !link_sta) return -EINVAL; @@ -1625,6 +1709,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, params->link_mac)) { return -EINVAL; } + } else if (new_link) { + return -EINVAL; } if (params->txpwr_set) { @@ -2554,7 +2640,8 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - struct ieee80211_link_data *link = &sdata->deflink; + struct ieee80211_link_data *link = + ieee80211_link_or_deflink(sdata, params->link_id, true); struct ieee80211_tx_queue_params p; if (!local->ops->conf_tx) @@ -2563,6 +2650,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, if (local->hw.queues < IEEE80211_NUM_ACS) return -EOPNOTSUPP; + if (IS_ERR(link)) + return PTR_ERR(link); + memset(&p, 0, sizeof(p)); p.aifs = params->aifs; p.cw_max = params->cwmax; @@ -3597,9 +3687,6 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_MESH_POINT: { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - if (params->chandef.width != sdata->vif.bss_conf.chandef.width) - return -EINVAL; - /* changes into another band are not supported */ if (sdata->vif.bss_conf.chandef.chan->band != params->chandef.chan->band) @@ -3732,7 +3819,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, IEEE80211_QUEUE_STOP_REASON_CSA); cfg80211_ch_switch_started_notify(sdata->dev, - &sdata->deflink.csa_chandef, + &sdata->deflink.csa_chandef, 0, params->count, params->block_tx); if (changed) { @@ -4614,6 +4701,9 @@ static int ieee80211_add_intf_link(struct wiphy *wiphy, { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + if (wdev->use_4addr) + return -EOPNOTSUPP; + return ieee80211_vif_set_links(sdata, wdev->valid_links); } diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c index 31e20a342f21..18bc6b78b267 100644 --- a/net/mac80211/eht.c +++ b/net/mac80211/eht.c @@ -30,7 +30,9 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, return; mcs_nss_size = ieee80211_eht_mcs_nss_size(he_cap_ie_elem, - &eht_cap_ie_elem->fixed); + &eht_cap_ie_elem->fixed, + sdata->vif.type == + NL80211_IFTYPE_STATION); eht_total_size += mcs_nss_size; diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index c2b38370bfb1..a3830d925cc2 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -83,17 +83,17 @@ static void ieee80211_get_stats(struct net_device *dev, #define ADD_STA_STATS(sta) \ do { \ - data[i++] += (sta)->rx_stats.packets; \ - data[i++] += (sta)->rx_stats.bytes; \ + data[i++] += sinfo.rx_packets; \ + data[i++] += sinfo.rx_bytes; \ data[i++] += (sta)->rx_stats.num_duplicates; \ data[i++] += (sta)->rx_stats.fragments; \ - data[i++] += (sta)->rx_stats.dropped; \ + data[i++] += sinfo.rx_dropped_misc; \ \ data[i++] += sinfo.tx_packets; \ data[i++] += sinfo.tx_bytes; \ data[i++] += (sta)->status_stats.filtered; \ - data[i++] += (sta)->status_stats.retry_failed; \ - data[i++] += (sta)->status_stats.retry_count; \ + data[i++] += sinfo.tx_failed; \ + data[i++] += sinfo.tx_retries; \ } while (0) /* For Managed stations, find the single station based on BSSID diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index d56890e3fabb..9dffc3079588 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -530,6 +530,10 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata) sdata_assert_lock(sdata); + /* When not connected/joined, sending CSA doesn't make sense. */ + if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) + return -ENOLINK; + /* update cfg80211 bss information with the new channel */ if (!is_zero_ether_addr(ifibss->bssid)) { cbss = cfg80211_get_bss(sdata->local->hw.wiphy, @@ -1346,10 +1350,10 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) capability, 0, true); } -static unsigned ibss_setup_channels(struct wiphy *wiphy, - struct ieee80211_channel **channels, - unsigned int channels_max, - u32 center_freq, u32 width) +static unsigned int ibss_setup_channels(struct wiphy *wiphy, + struct ieee80211_channel **channels, + unsigned int channels_max, + u32 center_freq, u32 width) { struct ieee80211_channel *chan = NULL; unsigned int n_chan = 0; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index e192e1ec0261..977aea4467e0 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -213,6 +213,7 @@ struct ieee80211_rx_data { struct ieee80211_sub_if_data *sdata; struct ieee80211_link_data *link; struct sta_info *sta; + struct link_sta_info *link_sta; struct ieee80211_key *key; unsigned int flags; @@ -1810,6 +1811,7 @@ void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 reason, bool tx); void ieee80211_mgd_setup_link(struct ieee80211_link_data *link); void ieee80211_mgd_stop_link(struct ieee80211_link_data *link); +void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); @@ -1929,9 +1931,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata); int ieee80211_add_virtual_monitor(struct ieee80211_local *local); void ieee80211_del_virtual_monitor(struct ieee80211_local *local); -int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, - u16 new_links); - bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata); void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, bool update_bss); @@ -1942,6 +1941,17 @@ static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) return test_bit(SDATA_STATE_RUNNING, &sdata->state); } +/* link handling */ +void ieee80211_link_setup(struct ieee80211_link_data *link); +void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, + int link_id, + struct ieee80211_link_data *link, + struct ieee80211_bss_conf *link_conf); +void ieee80211_link_stop(struct ieee80211_link_data *link); +int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, + u16 new_links); +void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata); + /* tx handling */ void ieee80211_clear_tx_pending(struct ieee80211_local *local); void ieee80211_tx_pending(struct tasklet_struct *t); @@ -2184,6 +2194,8 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, * for that non-transmitting BSS is returned * @link_id: the link ID to parse elements for, if a STA profile * is present in the multi-link element, or -1 to ignore + * @from_ap: frame is received from an AP (currently used only + * for EHT capabilities parsing) */ struct ieee80211_elems_parse_params { const u8 *start; @@ -2193,6 +2205,7 @@ struct ieee80211_elems_parse_params { u32 crc; struct cfg80211_bss *bss; int link_id; + bool from_ap; }; struct ieee802_11_elems * @@ -2382,6 +2395,7 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos, const struct ieee80211_sta_he_cap *he_cap, u8 *end); void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps_mode, struct sk_buff *skb); u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef); int ieee80211_parse_bitrates(enum nl80211_chan_width width, @@ -2407,8 +2421,7 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info, const struct ieee80211_vht_operation *oper, const struct ieee80211_ht_operation *htop, struct cfg80211_chan_def *chandef); -void ieee80211_chandef_eht_oper(struct ieee80211_sub_if_data *sdata, - const struct ieee80211_eht_operation *eht_oper, +void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation *eht_oper, bool support_160, bool support_320, struct cfg80211_chan_def *chandef); bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, @@ -2513,7 +2526,8 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype); u8 *ieee80211_ie_build_eht_cap(u8 *pos, const struct ieee80211_sta_he_cap *he_cap, const struct ieee80211_sta_eht_cap *eht_cap, - u8 *end); + u8 *end, + bool for_ap); void ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 95b58c5cac07..f99685e2d633 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -200,15 +200,73 @@ static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr, return ret; } +static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_roc_work *roc; + struct ieee80211_local *local = sdata->local; + struct ieee80211_sub_if_data *scan_sdata; + int ret = 0; + + /* To be the most flexible here we want to only limit changing the + * address if the specific interface is doing offchannel work or + * scanning. + */ + if (netif_carrier_ok(sdata->dev)) + return -EBUSY; + + mutex_lock(&local->mtx); + + /* First check no ROC work is happening on this iface */ + list_for_each_entry(roc, &local->roc_list, list) { + if (roc->sdata != sdata) + continue; + + if (roc->started) { + ret = -EBUSY; + goto unlock; + } + } + + /* And if this iface is scanning */ + if (local->scanning) { + scan_sdata = rcu_dereference_protected(local->scan_sdata, + lockdep_is_held(&local->mtx)); + if (sdata == scan_sdata) + ret = -EBUSY; + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + /* More interface types could be added here but changing the + * address while powered makes the most sense in client modes. + */ + break; + default: + return -EOPNOTSUPP; + } + +unlock: + mutex_unlock(&local->mtx); + return ret; +} + static int ieee80211_change_mac(struct net_device *dev, void *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; struct sockaddr *sa = addr; bool check_dup = true; + bool live = false; int ret; - if (ieee80211_sdata_running(sdata)) - return -EBUSY; + if (ieee80211_sdata_running(sdata)) { + ret = ieee80211_can_powered_addr_change(sdata); + if (ret) + return ret; + + live = true; + } if (sdata->vif.type == NL80211_IFTYPE_MONITOR && !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) @@ -218,6 +276,8 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr) if (ret) return ret; + if (live) + drv_remove_interface(local, sdata); ret = eth_mac_addr(dev, sa); if (ret == 0) { @@ -225,6 +285,12 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr) ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); } + /* Regardless of eth_mac_addr() return we still want to add the + * interface back. This should not fail... + */ + if (live) + WARN_ON(drv_add_interface(local, sdata)); + return ret; } @@ -296,6 +362,11 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, nsdata->vif.type)) return -ENOTUNIQ; + /* No support for VLAN with MLO yet */ + if (iftype == NL80211_IFTYPE_AP_VLAN && + nsdata->wdev.use_4addr) + return -EOPNOTSUPP; + /* * can only add VLANs to enabled APs */ @@ -368,246 +439,6 @@ static int ieee80211_open(struct net_device *dev) return err; } -static void ieee80211_link_setup(struct ieee80211_link_data *link) -{ - if (link->sdata->vif.type == NL80211_IFTYPE_STATION) - ieee80211_mgd_setup_link(link); -} - -static void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, - int link_id, - struct ieee80211_link_data *link, - struct ieee80211_bss_conf *link_conf) -{ - bool deflink = link_id < 0; - - if (link_id < 0) - link_id = 0; - - rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); - rcu_assign_pointer(sdata->link[link_id], link); - - link->sdata = sdata; - link->link_id = link_id; - link->conf = link_conf; - link_conf->link_id = link_id; - - INIT_WORK(&link->csa_finalize_work, - ieee80211_csa_finalize_work); - INIT_WORK(&link->color_change_finalize_work, - ieee80211_color_change_finalize_work); - INIT_LIST_HEAD(&link->assigned_chanctx_list); - INIT_LIST_HEAD(&link->reserved_chanctx_list); - INIT_DELAYED_WORK(&link->dfs_cac_timer_work, - ieee80211_dfs_cac_timer_work); - - if (!deflink) { - switch (sdata->vif.type) { - case NL80211_IFTYPE_AP: - ether_addr_copy(link_conf->addr, - sdata->wdev.links[link_id].addr); - WARN_ON(!(sdata->wdev.valid_links & BIT(link_id))); - break; - case NL80211_IFTYPE_STATION: - break; - default: - WARN_ON(1); - } - } -} - -static void ieee80211_link_stop(struct ieee80211_link_data *link) -{ - if (link->sdata->vif.type == NL80211_IFTYPE_STATION) - ieee80211_mgd_stop_link(link); - - ieee80211_link_release_channel(link); -} - -struct link_container { - struct ieee80211_link_data data; - struct ieee80211_bss_conf conf; -}; - -static void ieee80211_free_links(struct ieee80211_sub_if_data *sdata, - struct link_container **links) -{ - unsigned int link_id; - - synchronize_rcu(); - - for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { - if (!links[link_id]) - continue; - ieee80211_link_stop(&links[link_id]->data); - kfree(links[link_id]); - } -} - -static int ieee80211_check_dup_link_addrs(struct ieee80211_sub_if_data *sdata) -{ - unsigned int i, j; - - for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { - struct ieee80211_link_data *link1; - - link1 = sdata_dereference(sdata->link[i], sdata); - if (!link1) - continue; - for (j = i + 1; j < IEEE80211_MLD_MAX_NUM_LINKS; j++) { - struct ieee80211_link_data *link2; - - link2 = sdata_dereference(sdata->link[j], sdata); - if (!link2) - continue; - - if (ether_addr_equal(link1->conf->addr, - link2->conf->addr)) - return -EALREADY; - } - } - - return 0; -} - -static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, - struct link_container **to_free, - u16 new_links) -{ - u16 old_links = sdata->vif.valid_links; - unsigned long add = new_links & ~old_links; - unsigned long rem = old_links & ~new_links; - unsigned int link_id; - int ret; - struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *link; - struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]; - struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS]; - bool use_deflink = old_links == 0; /* set for error case */ - - sdata_assert_lock(sdata); - - memset(to_free, 0, sizeof(links)); - - if (old_links == new_links) - return 0; - - /* if there were no old links, need to clear the pointers to deflink */ - if (!old_links) - rem |= BIT(0); - - /* allocate new link structures first */ - for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { - link = kzalloc(sizeof(*link), GFP_KERNEL); - if (!link) { - ret = -ENOMEM; - goto free; - } - links[link_id] = link; - } - - /* keep track of the old pointers for the driver */ - BUILD_BUG_ON(sizeof(old) != sizeof(sdata->vif.link_conf)); - memcpy(old, sdata->vif.link_conf, sizeof(old)); - /* and for us in error cases */ - BUILD_BUG_ON(sizeof(old_data) != sizeof(sdata->link)); - memcpy(old_data, sdata->link, sizeof(old_data)); - - /* grab old links to free later */ - for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { - if (rcu_access_pointer(sdata->link[link_id]) != &sdata->deflink) { - /* - * we must have allocated the data through this path so - * we know we can free both at the same time - */ - to_free[link_id] = container_of(rcu_access_pointer(sdata->link[link_id]), - typeof(*links[link_id]), - data); - } - - RCU_INIT_POINTER(sdata->link[link_id], NULL); - RCU_INIT_POINTER(sdata->vif.link_conf[link_id], NULL); - } - - /* link them into data structures */ - for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { - WARN_ON(!use_deflink && - rcu_access_pointer(sdata->link[link_id]) == &sdata->deflink); - - link = links[link_id]; - ieee80211_link_init(sdata, link_id, &link->data, &link->conf); - ieee80211_link_setup(&link->data); - } - - if (new_links == 0) - ieee80211_link_init(sdata, -1, &sdata->deflink, - &sdata->vif.bss_conf); - - sdata->vif.valid_links = new_links; - - ret = ieee80211_check_dup_link_addrs(sdata); - if (!ret) { - /* tell the driver */ - ret = drv_change_vif_links(sdata->local, sdata, - old_links, new_links, - old); - } - - if (ret) { - /* restore config */ - memcpy(sdata->link, old_data, sizeof(old_data)); - memcpy(sdata->vif.link_conf, old, sizeof(old)); - sdata->vif.valid_links = old_links; - /* and free (only) the newly allocated links */ - memset(to_free, 0, sizeof(links)); - goto free; - } - - /* use deflink/bss_conf again if and only if there are no more links */ - use_deflink = new_links == 0; - - goto deinit; -free: - /* if we failed during allocation, only free all */ - for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { - kfree(links[link_id]); - links[link_id] = NULL; - } -deinit: - if (use_deflink) - ieee80211_link_init(sdata, -1, &sdata->deflink, - &sdata->vif.bss_conf); - return ret; -} - -int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, - u16 new_links) -{ - struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; - int ret; - - ret = ieee80211_vif_update_links(sdata, links, new_links); - ieee80211_free_links(sdata, links); - - return ret; -} - -static void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata) -{ - struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; - - /* - * The locking here is different because when we free links - * in the station case we need to be able to cancel_work_sync() - * something that also takes the lock. - */ - - sdata_lock(sdata); - ieee80211_vif_update_links(sdata, links, 0); - sdata_unlock(sdata); - - ieee80211_free_links(sdata, links); -} - static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down) { struct ieee80211_local *local = sdata->local; @@ -2267,7 +2098,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, wdev = &sdata->wdev; sdata->dev = NULL; - strlcpy(sdata->name, name, IFNAMSIZ); + strscpy(sdata->name, name, IFNAMSIZ); ieee80211_assign_perm_addr(local, wdev->address, type); memcpy(sdata->vif.addr, wdev->address, ETH_ALEN); ether_addr_copy(sdata->vif.bss_conf.addr, sdata->vif.addr); @@ -2396,6 +2227,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, sdata->u.mgd.use_4addr = params->use_4addr; ndev->features |= local->hw.netdev_features; + ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ndev->hw_features |= ndev->features & MAC80211_SUPPORTED_FEATURES_TX; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 6befb578ed9e..d89ec93b243b 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -344,9 +344,10 @@ static void ieee80211_pairwise_rekey(struct ieee80211_key *old, } } -static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, +static void __ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, bool uni, bool multi) { + struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; assert_key_lock(sdata->local); @@ -354,7 +355,7 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, if (idx >= 0 && idx < NUM_DEFAULT_KEYS) { key = key_mtx_dereference(sdata->local, sdata->keys[idx]); if (!key) - key = key_mtx_dereference(sdata->local, sdata->deflink.gtk[idx]); + key = key_mtx_dereference(sdata->local, link->gtk[idx]); } if (uni) { @@ -365,47 +366,48 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, } if (multi) - rcu_assign_pointer(sdata->deflink.default_multicast_key, key); + rcu_assign_pointer(link->default_multicast_key, key); ieee80211_debugfs_key_update_default(sdata); } -void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, +void ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, bool uni, bool multi) { - mutex_lock(&sdata->local->key_mtx); - __ieee80211_set_default_key(sdata, idx, uni, multi); - mutex_unlock(&sdata->local->key_mtx); + mutex_lock(&link->sdata->local->key_mtx); + __ieee80211_set_default_key(link, idx, uni, multi); + mutex_unlock(&link->sdata->local->key_mtx); } static void -__ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx) +__ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx) { + struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; assert_key_lock(sdata->local); if (idx >= NUM_DEFAULT_KEYS && idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) - key = key_mtx_dereference(sdata->local, - sdata->deflink.gtk[idx]); + key = key_mtx_dereference(sdata->local, link->gtk[idx]); - rcu_assign_pointer(sdata->deflink.default_mgmt_key, key); + rcu_assign_pointer(link->default_mgmt_key, key); ieee80211_debugfs_key_update_default(sdata); } -void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, +void ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx) { - mutex_lock(&sdata->local->key_mtx); - __ieee80211_set_default_mgmt_key(sdata, idx); - mutex_unlock(&sdata->local->key_mtx); + mutex_lock(&link->sdata->local->key_mtx); + __ieee80211_set_default_mgmt_key(link, idx); + mutex_unlock(&link->sdata->local->key_mtx); } static void -__ieee80211_set_default_beacon_key(struct ieee80211_sub_if_data *sdata, int idx) +__ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx) { + struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; assert_key_lock(sdata->local); @@ -413,28 +415,30 @@ __ieee80211_set_default_beacon_key(struct ieee80211_sub_if_data *sdata, int idx) if (idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS && idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS) - key = key_mtx_dereference(sdata->local, - sdata->deflink.gtk[idx]); + key = key_mtx_dereference(sdata->local, link->gtk[idx]); - rcu_assign_pointer(sdata->deflink.default_beacon_key, key); + rcu_assign_pointer(link->default_beacon_key, key); ieee80211_debugfs_key_update_default(sdata); } -void ieee80211_set_default_beacon_key(struct ieee80211_sub_if_data *sdata, +void ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx) { - mutex_lock(&sdata->local->key_mtx); - __ieee80211_set_default_beacon_key(sdata, idx); - mutex_unlock(&sdata->local->key_mtx); + mutex_lock(&link->sdata->local->key_mtx); + __ieee80211_set_default_beacon_key(link, idx); + mutex_unlock(&link->sdata->local->key_mtx); } static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, - struct sta_info *sta, - bool pairwise, - struct ieee80211_key *old, - struct ieee80211_key *new) + struct ieee80211_link_data *link, + struct sta_info *sta, + bool pairwise, + struct ieee80211_key *old, + struct ieee80211_key *new) { + struct link_sta_info *link_sta = sta ? &sta->deflink : NULL; + int link_id; int idx; int ret = 0; bool defunikey, defmultikey, defmgmtkey, defbeaconkey; @@ -446,13 +450,36 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, if (new) { idx = new->conf.keyidx; - list_add_tail_rcu(&new->list, &sdata->key_list); is_wep = new->conf.cipher == WLAN_CIPHER_SUITE_WEP40 || new->conf.cipher == WLAN_CIPHER_SUITE_WEP104; + link_id = new->conf.link_id; } else { idx = old->conf.keyidx; is_wep = old->conf.cipher == WLAN_CIPHER_SUITE_WEP40 || old->conf.cipher == WLAN_CIPHER_SUITE_WEP104; + link_id = old->conf.link_id; + } + + if (WARN(old && old->conf.link_id != link_id, + "old link ID %d doesn't match new link ID %d\n", + old->conf.link_id, link_id)) + return -EINVAL; + + if (link_id >= 0) { + if (!link) { + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + return -ENOLINK; + } + + if (sta) { + link_sta = rcu_dereference_protected(sta->link[link_id], + lockdep_is_held(&sta->local->sta_mtx)); + if (!link_sta) + return -ENOLINK; + } + } else { + link = &sdata->deflink; } if ((is_wep || pairwise) && idx >= NUM_DEFAULT_KEYS) @@ -482,6 +509,9 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, if (ret) return ret; + if (new) + list_add_tail_rcu(&new->list, &sdata->key_list); + if (sta) { if (pairwise) { rcu_assign_pointer(sta->ptk[idx], new); @@ -489,7 +519,7 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) _ieee80211_set_tx_key(new, true); } else { - rcu_assign_pointer(sta->deflink.gtk[idx], new); + rcu_assign_pointer(link_sta->gtk[idx], new); } /* Only needed for transition from no key -> key. * Still triggers unnecessary when using Extended Key ID @@ -503,39 +533,39 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, sdata->default_unicast_key); defmultikey = old && old == key_mtx_dereference(sdata->local, - sdata->deflink.default_multicast_key); + link->default_multicast_key); defmgmtkey = old && old == key_mtx_dereference(sdata->local, - sdata->deflink.default_mgmt_key); + link->default_mgmt_key); defbeaconkey = old && old == key_mtx_dereference(sdata->local, - sdata->deflink.default_beacon_key); + link->default_beacon_key); if (defunikey && !new) - __ieee80211_set_default_key(sdata, -1, true, false); + __ieee80211_set_default_key(link, -1, true, false); if (defmultikey && !new) - __ieee80211_set_default_key(sdata, -1, false, true); + __ieee80211_set_default_key(link, -1, false, true); if (defmgmtkey && !new) - __ieee80211_set_default_mgmt_key(sdata, -1); + __ieee80211_set_default_mgmt_key(link, -1); if (defbeaconkey && !new) - __ieee80211_set_default_beacon_key(sdata, -1); + __ieee80211_set_default_beacon_key(link, -1); if (is_wep || pairwise) rcu_assign_pointer(sdata->keys[idx], new); else - rcu_assign_pointer(sdata->deflink.gtk[idx], new); + rcu_assign_pointer(link->gtk[idx], new); if (defunikey && new) - __ieee80211_set_default_key(sdata, new->conf.keyidx, + __ieee80211_set_default_key(link, new->conf.keyidx, true, false); if (defmultikey && new) - __ieee80211_set_default_key(sdata, new->conf.keyidx, + __ieee80211_set_default_key(link, new->conf.keyidx, false, true); if (defmgmtkey && new) - __ieee80211_set_default_mgmt_key(sdata, + __ieee80211_set_default_mgmt_key(link, new->conf.keyidx); if (defbeaconkey && new) - __ieee80211_set_default_beacon_key(sdata, + __ieee80211_set_default_beacon_key(link, new->conf.keyidx); } @@ -569,6 +599,7 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, key->conf.flags = 0; key->flags = 0; + key->conf.link_id = -1; key->conf.cipher = cipher; key->conf.keyidx = idx; key->conf.keylen = key_len; @@ -797,9 +828,10 @@ static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, } int ieee80211_key_link(struct ieee80211_key *key, - struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, struct sta_info *sta) { + struct ieee80211_sub_if_data *sdata = link->sdata; static atomic_t key_color = ATOMIC_INIT(0); struct ieee80211_key *old_key = NULL; int idx = key->conf.keyidx; @@ -827,15 +859,26 @@ int ieee80211_key_link(struct ieee80211_key *key, (old_key && old_key->conf.cipher != key->conf.cipher)) goto out; } else if (sta) { - old_key = key_mtx_dereference(sdata->local, - sta->deflink.gtk[idx]); + struct link_sta_info *link_sta = &sta->deflink; + int link_id = key->conf.link_id; + + if (link_id >= 0) { + link_sta = rcu_dereference_protected(sta->link[link_id], + lockdep_is_held(&sta->local->sta_mtx)); + if (!link_sta) { + ret = -ENOLINK; + goto out; + } + } + + old_key = key_mtx_dereference(sdata->local, link_sta->gtk[idx]); } else { if (idx < NUM_DEFAULT_KEYS) old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); if (!old_key) old_key = key_mtx_dereference(sdata->local, - sdata->deflink.gtk[idx]); + link->gtk[idx]); } /* Non-pairwise keys must also not switch the cipher on rekey */ @@ -866,7 +909,7 @@ int ieee80211_key_link(struct ieee80211_key *key, increment_tailroom_need_count(sdata); - ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key); + ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key); if (!ret) { ieee80211_debugfs_key_add(key); @@ -890,9 +933,9 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) * Replace key with nothingness if it was ever used. */ if (key->sdata) - ieee80211_key_replace(key->sdata, key->sta, - key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, - key, NULL); + ieee80211_key_replace(key->sdata, NULL, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); ieee80211_key_destroy(key, delay_tailroom); } @@ -1019,15 +1062,45 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, ieee80211_debugfs_key_remove_beacon_default(sdata); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { - ieee80211_key_replace(key->sdata, key->sta, - key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, - key, NULL); + ieee80211_key_replace(key->sdata, NULL, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); list_add_tail(&key->list, keys); } ieee80211_debugfs_key_update_default(sdata); } +void ieee80211_remove_link_keys(struct ieee80211_link_data *link, + struct list_head *keys) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_key *key, *tmp; + + mutex_lock(&local->key_mtx); + list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { + if (key->conf.link_id != link->link_id) + continue; + ieee80211_key_replace(key->sdata, link, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); + list_add_tail(&key->list, keys); + } + mutex_unlock(&local->key_mtx); +} + +void ieee80211_free_key_list(struct ieee80211_local *local, + struct list_head *keys) +{ + struct ieee80211_key *key, *tmp; + + mutex_lock(&local->key_mtx); + list_for_each_entry_safe(key, tmp, keys, list) + __ieee80211_key_destroy(key, false); + mutex_unlock(&local->key_mtx); +} + void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, bool force_synchronize) { @@ -1087,9 +1160,9 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, key = key_mtx_dereference(local, sta->deflink.gtk[i]); if (!key) continue; - ieee80211_key_replace(key->sdata, key->sta, - key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, - key, NULL); + ieee80211_key_replace(key->sdata, NULL, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); __ieee80211_key_destroy(key, key->sdata->vif.type == NL80211_IFTYPE_STATION); } @@ -1098,9 +1171,9 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, key = key_mtx_dereference(local, sta->ptk[i]); if (!key) continue; - ieee80211_key_replace(key->sdata, key->sta, - key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, - key, NULL); + ieee80211_key_replace(key->sdata, NULL, key->sta, + key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, + key, NULL); __ieee80211_key_destroy(key, key->sdata->vif.type == NL80211_IFTYPE_STATION); } @@ -1307,7 +1380,8 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; - err = ieee80211_key_link(key, sdata, NULL); + /* FIXME: this function needs to get a link ID */ + err = ieee80211_key_link(key, &sdata->deflink, NULL); if (err) return ERR_PTR(err); diff --git a/net/mac80211/key.h b/net/mac80211/key.h index e994dcea1ce3..518af24aab56 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -22,6 +22,7 @@ struct ieee80211_local; struct ieee80211_sub_if_data; +struct ieee80211_link_data; struct sta_info; /** @@ -144,17 +145,21 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, * to make it used, free old key. On failure, also free the new key. */ int ieee80211_key_link(struct ieee80211_key *key, - struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, struct sta_info *sta); int ieee80211_set_tx_key(struct ieee80211_key *key); void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom); void ieee80211_key_free_unused(struct ieee80211_key *key); -void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, +void ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, bool uni, bool multi); -void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, +void ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx); -void ieee80211_set_default_beacon_key(struct ieee80211_sub_if_data *sdata, +void ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx); +void ieee80211_remove_link_keys(struct ieee80211_link_data *link, + struct list_head *keys); +void ieee80211_free_key_list(struct ieee80211_local *local, + struct list_head *keys); void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, bool force_synchronize); void ieee80211_free_sta_keys(struct ieee80211_local *local, diff --git a/net/mac80211/link.c b/net/mac80211/link.c new file mode 100644 index 000000000000..096f313c2a6e --- /dev/null +++ b/net/mac80211/link.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * MLO link handling + * + * Copyright (C) 2022 Intel Corporation + */ +#include <linux/slab.h> +#include <linux/kernel.h> +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "driver-ops.h" + +void ieee80211_link_setup(struct ieee80211_link_data *link) +{ + if (link->sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_mgd_setup_link(link); +} + +void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, + int link_id, + struct ieee80211_link_data *link, + struct ieee80211_bss_conf *link_conf) +{ + bool deflink = link_id < 0; + + if (link_id < 0) + link_id = 0; + + rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); + rcu_assign_pointer(sdata->link[link_id], link); + + link->sdata = sdata; + link->link_id = link_id; + link->conf = link_conf; + link_conf->link_id = link_id; + + INIT_WORK(&link->csa_finalize_work, + ieee80211_csa_finalize_work); + INIT_WORK(&link->color_change_finalize_work, + ieee80211_color_change_finalize_work); + INIT_LIST_HEAD(&link->assigned_chanctx_list); + INIT_LIST_HEAD(&link->reserved_chanctx_list); + INIT_DELAYED_WORK(&link->dfs_cac_timer_work, + ieee80211_dfs_cac_timer_work); + + if (!deflink) { + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + ether_addr_copy(link_conf->addr, + sdata->wdev.links[link_id].addr); + link_conf->bssid = link_conf->addr; + WARN_ON(!(sdata->wdev.valid_links & BIT(link_id))); + break; + case NL80211_IFTYPE_STATION: + /* station sets the bssid in ieee80211_mgd_setup_link */ + break; + default: + WARN_ON(1); + } + } +} + +void ieee80211_link_stop(struct ieee80211_link_data *link) +{ + if (link->sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_mgd_stop_link(link); + + ieee80211_link_release_channel(link); +} + +struct link_container { + struct ieee80211_link_data data; + struct ieee80211_bss_conf conf; +}; + +static void ieee80211_free_links(struct ieee80211_sub_if_data *sdata, + struct link_container **links) +{ + LIST_HEAD(keys); + unsigned int link_id; + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + if (!links[link_id]) + continue; + ieee80211_remove_link_keys(&links[link_id]->data, &keys); + } + + synchronize_rcu(); + + ieee80211_free_key_list(sdata->local, &keys); + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + if (!links[link_id]) + continue; + ieee80211_link_stop(&links[link_id]->data); + kfree(links[link_id]); + } +} + +static int ieee80211_check_dup_link_addrs(struct ieee80211_sub_if_data *sdata) +{ + unsigned int i, j; + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + struct ieee80211_link_data *link1; + + link1 = sdata_dereference(sdata->link[i], sdata); + if (!link1) + continue; + for (j = i + 1; j < IEEE80211_MLD_MAX_NUM_LINKS; j++) { + struct ieee80211_link_data *link2; + + link2 = sdata_dereference(sdata->link[j], sdata); + if (!link2) + continue; + + if (ether_addr_equal(link1->conf->addr, + link2->conf->addr)) + return -EALREADY; + } + } + + return 0; +} + +static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, + struct link_container **to_free, + u16 new_links) +{ + u16 old_links = sdata->vif.valid_links; + unsigned long add = new_links & ~old_links; + unsigned long rem = old_links & ~new_links; + unsigned int link_id; + int ret; + struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *link; + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]; + struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS]; + bool use_deflink = old_links == 0; /* set for error case */ + + sdata_assert_lock(sdata); + + memset(to_free, 0, sizeof(links)); + + if (old_links == new_links) + return 0; + + /* if there were no old links, need to clear the pointers to deflink */ + if (!old_links) + rem |= BIT(0); + + /* allocate new link structures first */ + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; + goto free; + } + links[link_id] = link; + } + + /* keep track of the old pointers for the driver */ + BUILD_BUG_ON(sizeof(old) != sizeof(sdata->vif.link_conf)); + memcpy(old, sdata->vif.link_conf, sizeof(old)); + /* and for us in error cases */ + BUILD_BUG_ON(sizeof(old_data) != sizeof(sdata->link)); + memcpy(old_data, sdata->link, sizeof(old_data)); + + /* grab old links to free later */ + for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { + if (rcu_access_pointer(sdata->link[link_id]) != &sdata->deflink) { + /* + * we must have allocated the data through this path so + * we know we can free both at the same time + */ + to_free[link_id] = container_of(rcu_access_pointer(sdata->link[link_id]), + typeof(*links[link_id]), + data); + } + + RCU_INIT_POINTER(sdata->link[link_id], NULL); + RCU_INIT_POINTER(sdata->vif.link_conf[link_id], NULL); + } + + /* link them into data structures */ + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + WARN_ON(!use_deflink && + rcu_access_pointer(sdata->link[link_id]) == &sdata->deflink); + + link = links[link_id]; + ieee80211_link_init(sdata, link_id, &link->data, &link->conf); + ieee80211_link_setup(&link->data); + } + + if (new_links == 0) + ieee80211_link_init(sdata, -1, &sdata->deflink, + &sdata->vif.bss_conf); + + sdata->vif.valid_links = new_links; + + ret = ieee80211_check_dup_link_addrs(sdata); + if (!ret) { + /* tell the driver */ + ret = drv_change_vif_links(sdata->local, sdata, + old_links, new_links, + old); + } + + if (ret) { + /* restore config */ + memcpy(sdata->link, old_data, sizeof(old_data)); + memcpy(sdata->vif.link_conf, old, sizeof(old)); + sdata->vif.valid_links = old_links; + /* and free (only) the newly allocated links */ + memset(to_free, 0, sizeof(links)); + goto free; + } + + /* use deflink/bss_conf again if and only if there are no more links */ + use_deflink = new_links == 0; + + goto deinit; +free: + /* if we failed during allocation, only free all */ + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + kfree(links[link_id]); + links[link_id] = NULL; + } +deinit: + if (use_deflink) + ieee80211_link_init(sdata, -1, &sdata->deflink, + &sdata->vif.bss_conf); + return ret; +} + +int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, + u16 new_links) +{ + struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; + int ret; + + ret = ieee80211_vif_update_links(sdata, links, new_links); + ieee80211_free_links(sdata, links); + + return ret; +} + +void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata) +{ + struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; + + /* + * The locking here is different because when we free links + * in the station case we need to be able to cancel_work_sync() + * something that also takes the lock. + */ + + sdata_lock(sdata); + ieee80211_vif_update_links(sdata, links, 0); + sdata_unlock(sdata); + + ieee80211_free_links(sdata, links); +} diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 5b1c47ed0cc0..46f3eddc2388 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -699,6 +699,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_FREQ_KHZ); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE); if (!ops->hw_scan) { wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6991c4c479da..5a99b8f6e465 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -634,7 +634,7 @@ int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata, if (!iftd) return 0; - ieee80211_ie_build_he_6ghz_cap(sdata, skb); + ieee80211_ie_build_he_6ghz_cap(sdata, sdata->deflink.smps_mode, skb); return 0; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3d4ab711f0d1..699e409ef45a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -314,7 +314,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) { struct cfg80211_chan_def eht_chandef = *chandef; - ieee80211_chandef_eht_oper(sdata, eht_oper, + ieee80211_chandef_eht_oper(eht_oper, eht_chandef.width == NL80211_CHAN_WIDTH_160, false, &eht_chandef); @@ -695,6 +695,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_supported_band *sband, + enum ieee80211_smps_mode smps_mode, ieee80211_conn_flags_t conn_flags) { u8 *pos, *pre_he_pos; @@ -719,7 +720,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, /* trim excess if any */ skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos)); - ieee80211_ie_build_he_6ghz_cap(sdata, skb); + ieee80211_ie_build_he_6ghz_cap(sdata, smps_mode, skb); } static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata, @@ -746,11 +747,13 @@ static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata, eht_cap_size = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, - &eht_cap->eht_cap_elem) + + &eht_cap->eht_cap_elem, + false) + ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], eht_cap->eht_cap_elem.phy_cap_info); pos = skb_put(skb, eht_cap_size); - ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size); + ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size, + false); } static void ieee80211_assoc_add_rates(struct sk_buff *skb, @@ -1098,7 +1101,7 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata, offset); if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HE)) { - ieee80211_add_he_ie(sdata, skb, sband, + ieee80211_add_he_ie(sdata, skb, sband, smps_mode, assoc_data->link[link_id].conn_flags); ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY); } @@ -1220,14 +1223,21 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata, ml_elem = skb_put(skb, sizeof(*ml_elem)); ml_elem->control = cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_BASIC | - IEEE80211_MLC_BASIC_PRES_EML_CAPA | IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP); common = skb_put(skb, sizeof(*common)); common->len = sizeof(*common) + - 2 + /* EML capabilities */ 2; /* MLD capa/ops */ memcpy(common->mld_mac_addr, sdata->vif.addr, ETH_ALEN); - skb_put_data(skb, &eml_capa, sizeof(eml_capa)); + + /* add EML_CAPA only if needed, see Draft P802.11be_D2.1, 35.3.17 */ + if (eml_capa & + cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP | + IEEE80211_EML_CAP_EMLMR_SUPPORT))) { + common->len += 2; /* EML capabilities */ + ml_elem->control |= + cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA); + skb_put_data(skb, &eml_capa, sizeof(eml_capa)); + } /* need indication from userspace to support this */ mld_capa_ops &= ~cpu_to_le16(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP); skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops)); @@ -1902,7 +1912,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, IEEE80211_QUEUE_STOP_REASON_CSA); mutex_unlock(&local->mtx); - cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, + cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, 0, csa_ie.count, csa_ie.mode); if (local->ops->channel_switch) { @@ -2435,6 +2445,29 @@ static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work) ieee80211_sta_handle_tspec_ac_params(sdata); } +void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_tx_queue_params *params = link->tx_conf; + u8 ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + mlme_dbg(sdata, + "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", + ac, params[ac].acm, + params[ac].aifs, params[ac].cw_min, params[ac].cw_max, + params[ac].txop, params[ac].uapsd, + ifmgd->tx_tspec[ac].downgraded); + if (!ifmgd->tx_tspec[ac].downgraded && + drv_conf_tx(local, link, ac, ¶ms[ac])) + link_err(link, + "failed to set TX queue parameters for AC %d\n", + ac); + } +} + /* MLME */ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, @@ -2566,20 +2599,10 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local, } } - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - mlme_dbg(sdata, - "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n", - ac, params[ac].acm, - params[ac].aifs, params[ac].cw_min, params[ac].cw_max, - params[ac].txop, params[ac].uapsd, - ifmgd->tx_tspec[ac].downgraded); + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) link->tx_conf[ac] = params[ac]; - if (!ifmgd->tx_tspec[ac].downgraded && - drv_conf_tx(local, link, ac, ¶ms[ac])) - link_err(link, - "failed to set TX queue parameters for AC %d\n", - ac); - } + + ieee80211_mgd_set_link_qos_params(link); /* enable WMM or activate new settings */ link->conf->qos = true; @@ -3420,11 +3443,11 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; + mutex_lock(&sdata->local->mtx); ieee80211_link_release_channel(&sdata->deflink); - mutex_unlock(&sdata->local->mtx); - ieee80211_vif_set_links(sdata, 0); + mutex_unlock(&sdata->local->mtx); } cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss); @@ -3462,10 +3485,6 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, sdata->u.mgd.flags = 0; sdata->vif.bss_conf.mu_mimo_owner = false; - mutex_lock(&sdata->local->mtx); - ieee80211_link_release_channel(&sdata->deflink); - mutex_unlock(&sdata->local->mtx); - if (status != ASSOC_REJECTED) { struct cfg80211_assoc_failure data = { .timeout = status == ASSOC_TIMEOUT, @@ -3484,7 +3503,10 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, cfg80211_assoc_failure(sdata->dev, &data); } + mutex_lock(&sdata->local->mtx); + ieee80211_link_release_channel(&sdata->deflink); ieee80211_vif_set_links(sdata, 0); + mutex_unlock(&sdata->local->mtx); } kfree(assoc_data); @@ -3905,6 +3927,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, .len = elem_len, .bss = cbss, .link_id = link == &sdata->deflink ? -1 : link->link_id, + .from_ap = true, }; bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ; @@ -4573,6 +4596,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; struct ieee80211_bss *bss = (void *)cbss->priv; + struct ieee80211_elems_parse_params parse_params = { + .bss = cbss, + .link_id = -1, + .from_ap = true, + }; struct ieee802_11_elems *elems; const struct cfg80211_bss_ies *ies; int ret; @@ -4582,7 +4610,9 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, rcu_read_lock(); ies = rcu_dereference(cbss->ies); - elems = ieee802_11_parse_elems(ies->data, ies->len, false, cbss); + parse_params.start = ies->data; + parse_params.len = ies->len; + elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) { rcu_read_unlock(); return -ENOMEM; @@ -4937,6 +4967,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u16 capab_info, status_code, aid; + struct ieee80211_elems_parse_params parse_params = { + .bss = NULL, + .link_id = -1, + .from_ap = true, + }; struct ieee802_11_elems *elems; int ac; const u8 *elem_start; @@ -4991,7 +5026,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, return; elem_len = len - (elem_start - (u8 *)mgmt); - elems = ieee802_11_parse_elems(elem_start, elem_len, false, NULL); + parse_params.start = elem_start; + parse_params.len = elem_len; + elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) goto notify_driver; @@ -5124,7 +5161,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, resp.req_ies = ifmgd->assoc_req_ies; resp.req_ies_len = ifmgd->assoc_req_ies_len; if (sdata->vif.valid_links) - resp.ap_mld_addr = assoc_data->ap_addr; + resp.ap_mld_addr = sdata->vif.cfg.ap_addr; cfg80211_rx_assoc_resp(sdata->dev, &resp); notify_driver: drv_mgd_complete_tx(sdata->local, sdata, &info); @@ -5356,6 +5393,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, u32 ncrc = 0; u8 *bssid, *variable = mgmt->u.beacon.variable; u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; + struct ieee80211_elems_parse_params parse_params = { + .link_id = -1, + .from_ap = true, + }; sdata_assert_lock(sdata); @@ -5374,6 +5415,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, if (baselen > len) return; + parse_params.start = variable; + parse_params.len = len - baselen; + rcu_read_lock(); chanctx_conf = rcu_dereference(link->conf->chanctx_conf); if (!chanctx_conf) { @@ -5392,8 +5436,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon && !WARN_ON(sdata->vif.valid_links) && ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->link[0].bss)) { - elems = ieee802_11_parse_elems(variable, len - baselen, false, - ifmgd->assoc_data->link[0].bss); + parse_params.bss = ifmgd->assoc_data->link[0].bss; + elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return; @@ -5459,9 +5503,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, */ if (!ieee80211_is_s1g_beacon(hdr->frame_control)) ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); - elems = ieee802_11_parse_elems_crc(variable, len - baselen, - false, care_about_ies, ncrc, - link->u.mgd.bss); + parse_params.bss = link->u.mgd.bss; + parse_params.filter = care_about_ies; + parse_params.crc = ncrc; + elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return; ncrc = elems->crc; @@ -5671,6 +5716,13 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, sdata_lock(sdata); + if (rx_status->link_valid) { + link = sdata_dereference(sdata->link[rx_status->link_id], + sdata); + if (!link) + goto out; + } + switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: ieee80211_rx_mgmt_beacon(link, (void *)mgmt, @@ -5747,6 +5799,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, } break; } +out: sdata_unlock(sdata); } @@ -6282,6 +6335,8 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link) if (sdata->u.mgd.assoc_data) ether_addr_copy(link->conf->addr, sdata->u.mgd.assoc_data->link[link_id].addr); + else if (!is_valid_ether_addr(link->conf->addr)) + eth_random_addr(link->conf->addr); } /* scan finished notification */ @@ -6369,9 +6424,6 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, goto out_err; } - if (mlo && !is_valid_ether_addr(link->conf->addr)) - eth_random_addr(link->conf->addr); - if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) { err = -EINVAL; goto out_err; @@ -6509,6 +6561,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, return 0; out_err: + ieee80211_link_release_channel(&sdata->deflink); ieee80211_vif_set_links(sdata, 0); return err; } @@ -6853,6 +6906,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, } } + /* FIXME: no support for 4-addr MLO yet */ + if (sdata->u.mgd.use_4addr && req->link_id >= 0) + return -EOPNOTSUPP; + assoc_data = kzalloc(size, GFP_KERNEL); if (!assoc_data) return -ENOMEM; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 57df21e2170a..511c809e2c6b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -215,9 +215,19 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, } static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, + int link_id, struct sta_info *sta, struct sk_buff *skb) { + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + if (link_id >= 0) { + status->link_valid = 1; + status->link_id = link_id; + } else { + status->link_valid = 0; + } + skb_queue_tail(&sdata->skb_queue, skb); ieee80211_queue_work(&sdata->local->hw, &sdata->work); if (sta) @@ -225,11 +235,12 @@ static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, } static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, + int link_id, struct sta_info *sta, struct sk_buff *skb) { skb->protocol = 0; - __ieee80211_queue_skb_to_iface(sdata, sta, skb); + __ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb); } static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, @@ -272,7 +283,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, if (!skb) return; - ieee80211_queue_skb_to_iface(sdata, NULL, skb); + ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb); } /* @@ -1394,7 +1405,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, /* if this mpdu is fragmented - terminate rx aggregation session */ sc = le16_to_cpu(hdr->seq_ctrl); if (sc & IEEE80211_SCTL_FRAG) { - ieee80211_queue_skb_to_iface(rx->sdata, NULL, skb); + ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb); return; } @@ -1854,7 +1865,6 @@ static struct ieee80211_key * ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) { struct ieee80211_key *key = NULL; - struct ieee80211_sub_if_data *sdata = rx->sdata; int idx2; /* Make sure key gets set if either BIGTK key index is set so that @@ -1873,14 +1883,14 @@ ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) idx2 = idx - 1; } - if (rx->sta) - key = rcu_dereference(rx->sta->deflink.gtk[idx]); + if (rx->link_sta) + key = rcu_dereference(rx->link_sta->gtk[idx]); if (!key) - key = rcu_dereference(sdata->deflink.gtk[idx]); - if (!key && rx->sta) - key = rcu_dereference(rx->sta->deflink.gtk[idx2]); + key = rcu_dereference(rx->link->gtk[idx]); + if (!key && rx->link_sta) + key = rcu_dereference(rx->link_sta->gtk[idx2]); if (!key) - key = rcu_dereference(sdata->deflink.gtk[idx2]); + key = rcu_dereference(rx->link->gtk[idx2]); return key; } @@ -1986,15 +1996,15 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (mmie_keyidx < NUM_DEFAULT_KEYS || mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) return RX_DROP_MONITOR; /* unexpected BIP keyidx */ - if (rx->sta) { + if (rx->link_sta) { if (ieee80211_is_group_privacy_action(skb) && test_sta_flag(rx->sta, WLAN_STA_MFP)) return RX_DROP_MONITOR; - rx->key = rcu_dereference(rx->sta->deflink.gtk[mmie_keyidx]); + rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]); } if (!rx->key) - rx->key = rcu_dereference(rx->sdata->deflink.gtk[mmie_keyidx]); + rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]); } else if (!ieee80211_has_protected(fc)) { /* * The frame was not protected, so skip decryption. However, we @@ -2003,25 +2013,24 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) * have been expected. */ struct ieee80211_key *key = NULL; - struct ieee80211_sub_if_data *sdata = rx->sdata; int i; if (ieee80211_is_beacon(fc)) { key = ieee80211_rx_get_bigtk(rx, -1); } else if (ieee80211_is_mgmt(fc) && is_multicast_ether_addr(hdr->addr1)) { - key = rcu_dereference(rx->sdata->deflink.default_mgmt_key); + key = rcu_dereference(rx->link->default_mgmt_key); } else { - if (rx->sta) { + if (rx->link_sta) { for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - key = rcu_dereference(rx->sta->deflink.gtk[i]); + key = rcu_dereference(rx->link_sta->gtk[i]); if (key) break; } } if (!key) { for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - key = rcu_dereference(sdata->deflink.gtk[i]); + key = rcu_dereference(rx->link->gtk[i]); if (key) break; } @@ -2050,13 +2059,13 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; /* check per-station GTK first, if multicast packet */ - if (is_multicast_ether_addr(hdr->addr1) && rx->sta) - rx->key = rcu_dereference(rx->sta->deflink.gtk[keyidx]); + if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta) + rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]); /* if not found, try default key */ if (!rx->key) { if (is_multicast_ether_addr(hdr->addr1)) - rx->key = rcu_dereference(rx->sdata->deflink.gtk[keyidx]); + rx->key = rcu_dereference(rx->link->gtk[keyidx]); if (!rx->key) rx->key = rcu_dereference(rx->sdata->keys[keyidx]); @@ -3046,7 +3055,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { rx->skb->protocol = cpu_to_be16(ETH_P_TDLS); - __ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); + __ieee80211_queue_skb_to_iface(sdata, rx->link_id, + rx->sta, rx->skb); return RX_QUEUED; } } @@ -3636,7 +3646,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) return RX_QUEUED; queue: - ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); return RX_QUEUED; } @@ -3794,7 +3804,7 @@ ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) return RX_DROP_MONITOR; /* for now only beacons are ext, so queue them */ - ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); return RX_QUEUED; } @@ -3851,7 +3861,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) return RX_DROP_MONITOR; } - ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); + ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); return RX_QUEUED; } @@ -4074,6 +4084,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) .link_id = -1, }; struct tid_ampdu_rx *tid_agg_rx; + u8 link_id; tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); if (!tid_agg_rx) @@ -4093,6 +4104,9 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) }; drv_event_callback(rx.local, rx.sdata, &event); } + /* FIXME: statistics won't be right with this */ + link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0; + rx.link = rcu_dereference(sta->sdata->link[link_id]); ieee80211_rx_handlers(&rx, &frames); } @@ -4508,6 +4522,15 @@ void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) mutex_unlock(&local->sta_mtx); } +static bool +ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) +{ + if (!sta->mlo) + return false; + + return !!(sta->valid_links & BIT(link_id)); +} + static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, struct ieee80211_fast_rx *fast_rx, int orig_len) @@ -4515,19 +4538,30 @@ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, struct ieee80211_sta_rx_stats *stats; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); struct sta_info *sta = rx->sta; + struct link_sta_info *link_sta; struct sk_buff *skb = rx->skb; void *sa = skb->data + ETH_ALEN; void *da = skb->data; - stats = &sta->deflink.rx_stats; + if (rx->link_id >= 0) { + link_sta = rcu_dereference(sta->link[rx->link_id]); + if (WARN_ON_ONCE(!link_sta)) { + dev_kfree_skb(rx->skb); + return; + } + } else { + link_sta = &sta->deflink; + } + + stats = &link_sta->rx_stats; if (fast_rx->uses_rss) - stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats); + stats = this_cpu_ptr(link_sta->pcpu_rx_stats); /* statistics part of ieee80211_rx_h_sta_process() */ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { stats->last_signal = status->signal; if (!fast_rx->uses_rss) - ewma_signal_add(&sta->deflink.rx_stats_avg.signal, + ewma_signal_add(&link_sta->rx_stats_avg.signal, -status->signal); } @@ -4543,7 +4577,7 @@ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, stats->chain_signal_last[i] = signal; if (!fast_rx->uses_rss) - ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i], + ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], -signal); } } @@ -4619,7 +4653,8 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; } addrs __aligned(2); - struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; + struct link_sta_info *link_sta; + struct ieee80211_sta_rx_stats *stats; /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write * to a common data structure; drivers can implement that per queue @@ -4720,8 +4755,19 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, return true; drop: dev_kfree_skb(skb); + + if (rx->link_id >= 0) { + link_sta = rcu_dereference(sta->link[rx->link_id]); + if (!link_sta) + return true; + } else { + link_sta = &sta->deflink; + } + if (fast_rx->uses_rss) - stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats); + stats = this_cpu_ptr(link_sta->pcpu_rx_stats); + else + stats = &link_sta->rx_stats; stats->dropped++; return true; @@ -4769,7 +4815,17 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, if (!link) return true; rx->link = link; + + if (rx->sta) { + rx->link_sta = + rcu_dereference(rx->sta->link[rx->link_id]); + if (!rx->link_sta) + return true; + } } else { + if (rx->sta) + rx->link_sta = &rx->sta->deflink; + rx->link = &sdata->deflink; } @@ -4827,6 +4883,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw, struct list_head *list) { struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_fast_rx *fast_rx; struct ieee80211_rx_data rx; @@ -4847,7 +4904,31 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw, rx.sta = container_of(pubsta, struct sta_info, sta); rx.sdata = rx.sta->sdata; - rx.link = &rx.sdata->deflink; + + if (status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id)) + goto drop; + + /* + * TODO: Should the frame be dropped if the right link_id is not + * available? Or may be it is fine in the current form to proceed with + * the frame processing because with frame being in 802.3 format, + * link_id is used only for stats purpose and updating the stats on + * the deflink is fine? + */ + if (status->link_valid) + rx.link_id = status->link_id; + + if (rx.link_id >= 0) { + struct ieee80211_link_data *link; + + link = rcu_dereference(rx.sdata->link[rx.link_id]); + if (!link) + goto drop; + rx.link = link; + } else { + rx.link = &rx.sdata->deflink; + } fast_rx = rcu_dereference(rx.sta->fast_rx); if (!fast_rx) @@ -4877,7 +4958,19 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx, rx->sta = link_sta->sta; rx->link_id = link_sta->link_id; } else { + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2); + if (rx->sta) { + if (status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, + status->link_id)) + return false; + + rx->link_id = status->link_valid ? status->link_id : -1; + } else { + rx->link_id = -1; + } } return ieee80211_prepare_and_rx_handle(rx, skb, consume); @@ -4893,6 +4986,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, struct list_head *list) { struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_sub_if_data *sdata; struct ieee80211_hdr *hdr; __le16 fc; @@ -4937,10 +5031,39 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, if (ieee80211_is_data(fc)) { struct sta_info *sta, *prev_sta; + u8 link_id = status->link_id; if (pubsta) { rx.sta = container_of(pubsta, struct sta_info, sta); rx.sdata = rx.sta->sdata; + + if (status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id)) + goto out; + + if (status->link_valid) + rx.link_id = status->link_id; + + /* + * In MLO connection, fetch the link_id using addr2 + * when the driver does not pass link_id in status. + * When the address translation is already performed by + * driver/hw, the valid link_id must be passed in + * status. + */ + + if (!status->link_valid && pubsta->mlo) { + struct ieee80211_hdr *hdr = (void *)skb->data; + struct link_sta_info *link_sta; + + link_sta = link_sta_info_get_bss(rx.sdata, + hdr->addr2); + if (!link_sta) + goto out; + + rx.link_id = link_sta->link_id; + } + if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) return; goto out; @@ -4954,6 +5077,13 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, continue; } + if ((status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta, + link_id)) || + (!status->link_valid && prev_sta->sta.mlo)) + continue; + + rx.link_id = status->link_valid ? link_id : -1; rx.sta = prev_sta; rx.sdata = prev_sta->sdata; ieee80211_prepare_and_rx_handle(&rx, skb, false); @@ -4962,6 +5092,13 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, } if (prev_sta) { + if ((status->link_valid && + !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta, + link_id)) || + (!status->link_valid && prev_sta->sta.mlo)) + goto out; + + rx.link_id = status->link_valid ? link_id : -1; rx.sta = prev_sta; rx.sdata = prev_sta->sdata; @@ -5104,6 +5241,9 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, } } + if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED)) + goto drop; + status->rx_flags = 0; kcov_remote_start_common(skb_get_kcov_handle(skb)); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index fa8ddf576bc1..0e8c4f48c36d 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -469,20 +469,23 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); - if (scan_req != local->int_scan_req) { - local->scan_info.aborted = aborted; - cfg80211_scan_done(scan_req, &local->scan_info); - } RCU_INIT_POINTER(local->scan_req, NULL); RCU_INIT_POINTER(local->scan_sdata, NULL); local->scanning = 0; local->scan_chandef.chan = NULL; + synchronize_rcu(); + + if (scan_req != local->int_scan_req) { + local->scan_info.aborted = aborted; + cfg80211_scan_done(scan_req, &local->scan_info); + } + /* Set power back to normal operating levels. */ ieee80211_hw_config(local, 0); - if (!hw_scan) { + if (!hw_scan && was_scanning) { ieee80211_configure_filter(local); drv_sw_scan_complete(local, scan_sdata); ieee80211_offchannel_return(local); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index cb23da9aff1e..fe8702d92892 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -472,6 +472,7 @@ static void sta_info_add_link(struct sta_info *sta, link_info->sta = sta; link_info->link_id = link_id; link_info->pub = link_sta; + link_sta->link_id = link_id; rcu_assign_pointer(sta->link[link_id], link_info); rcu_assign_pointer(sta->sta.link[link_id], link_sta); } @@ -494,7 +495,7 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->sdata = sdata; if (sta_info_alloc_link(local, &sta->deflink, gfp)) - return NULL; + goto free; if (link_id >= 0) { sta_info_add_link(sta, link_id, &sta->deflink, @@ -2316,9 +2317,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, u64 value; do { - start = u64_stats_fetch_begin(&rxstats->syncp); + start = u64_stats_fetch_begin_irq(&rxstats->syncp); value = rxstats->msdu[tid]; - } while (u64_stats_fetch_retry(&rxstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); return value; } @@ -2384,9 +2385,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) u64 value; do { - start = u64_stats_fetch_begin(&rxstats->syncp); + start = u64_stats_fetch_begin_irq(&rxstats->syncp); value = rxstats->bytes; - } while (u64_stats_fetch_retry(&rxstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); return value; } @@ -2777,10 +2778,8 @@ int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id) sta->sta.valid_links = new_links; - if (!test_sta_flag(sta, WLAN_STA_INSERTED)) { - ret = 0; + if (!test_sta_flag(sta, WLAN_STA_INSERTED)) goto hash; - } ret = drv_change_sta_links(sdata->local, sdata, &sta->sta, old_links, new_links); @@ -2799,6 +2798,7 @@ hash: void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = sta->sdata; + u16 old_links = sta->sta.valid_links; lockdep_assert_held(&sdata->local->sta_mtx); @@ -2806,8 +2806,7 @@ void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id) if (test_sta_flag(sta, WLAN_STA_INSERTED)) drv_change_sta_links(sdata->local, sdata, &sta->sta, - sta->sta.valid_links, - sta->sta.valid_links & ~BIT(link_id)); + old_links, sta->sta.valid_links); sta_remove_link(sta, link_id, true); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 45df9932d0ba..1be8c9d83d6a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -576,6 +576,51 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) return TX_CONTINUE; } +static struct ieee80211_key * +ieee80211_select_link_key(struct ieee80211_tx_data *tx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + enum { + USE_NONE, + USE_MGMT_KEY, + USE_MCAST_KEY, + } which_key = USE_NONE; + struct ieee80211_link_data *link; + unsigned int link_id; + + if (ieee80211_is_group_privacy_action(tx->skb)) + which_key = USE_MCAST_KEY; + else if (ieee80211_is_mgmt(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr1) && + ieee80211_is_robust_mgmt_frame(tx->skb)) + which_key = USE_MGMT_KEY; + else if (is_multicast_ether_addr(hdr->addr1)) + which_key = USE_MCAST_KEY; + else + return NULL; + + link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK); + if (link_id == IEEE80211_LINK_UNSPECIFIED) { + link = &tx->sdata->deflink; + } else { + link = rcu_dereference(tx->sdata->link[link_id]); + if (!link) + return NULL; + } + + switch (which_key) { + case USE_NONE: + break; + case USE_MGMT_KEY: + return rcu_dereference(link->default_mgmt_key); + case USE_MCAST_KEY: + return rcu_dereference(link->default_multicast_key); + } + + return NULL; +} + static ieee80211_tx_result debug_noinline ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) { @@ -591,16 +636,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) if (tx->sta && (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) tx->key = key; - else if (ieee80211_is_group_privacy_action(tx->skb) && - (key = rcu_dereference(tx->sdata->deflink.default_multicast_key))) - tx->key = key; - else if (ieee80211_is_mgmt(hdr->frame_control) && - is_multicast_ether_addr(hdr->addr1) && - ieee80211_is_robust_mgmt_frame(tx->skb) && - (key = rcu_dereference(tx->sdata->deflink.default_mgmt_key))) - tx->key = key; - else if (is_multicast_ether_addr(hdr->addr1) && - (key = rcu_dereference(tx->sdata->deflink.default_multicast_key))) + else if ((key = ieee80211_select_link_key(tx))) tx->key = key; else if (!is_multicast_ether_addr(hdr->addr1) && (key = rcu_dereference(tx->sdata->default_unicast_key))) @@ -2640,7 +2676,8 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, goto free; } memcpy(hdr.addr2, link->conf->addr, ETH_ALEN); - } else if (link_id == IEEE80211_LINK_UNSPECIFIED) { + } else if (link_id == IEEE80211_LINK_UNSPECIFIED || + (sta && sta->sta.mlo)) { memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); } else { struct ieee80211_bss_conf *conf; @@ -3735,8 +3772,8 @@ begin: !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) && (!(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) || - !ether_addr_equal(tx.sdata->vif.addr, - hdr->addr2)))) { + !ieee80211_is_our_addr(tx.sdata, hdr->addr2, + NULL)))) { I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); ieee80211_free_txskb(&local->hw, skb); goto begin; @@ -5061,6 +5098,8 @@ ieee80211_beacon_get_finish(struct ieee80211_hw *hw, rate_control_get_rate(sdata, NULL, &txrc); info->control.vif = vif; + info->control.flags |= u32_encode_bits(link->link_id, + IEEE80211_TX_CTRL_MLO_LINK); info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_ASSIGN_SEQ | IEEE80211_TX_CTL_FIRST_FRAGMENT; @@ -5885,6 +5924,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, rcu_read_lock(); err = ieee80211_lookup_ra_sta(sdata, skb, &sta); if (err) { + dev_kfree_skb(skb); rcu_read_unlock(); return err; } @@ -5899,7 +5939,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, * for MLO STA, the SA should be the AP MLD address, but * the link ID has been selected already */ - if (sta->sta.mlo) + if (sta && sta->sta.mlo) memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN); } rcu_read_unlock(); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 53826c663723..3359ab332d7d 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -954,9 +954,11 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_queue_delayed_work); -static void ieee80211_parse_extension_element(u32 *crc, - const struct element *elem, - struct ieee802_11_elems *elems) +static void +ieee80211_parse_extension_element(u32 *crc, + const struct element *elem, + struct ieee802_11_elems *elems, + struct ieee80211_elems_parse_params *params) { const void *data = elem->data + 1; u8 len; @@ -1013,7 +1015,8 @@ static void ieee80211_parse_extension_element(u32 *crc, break; case WLAN_EID_EXT_EHT_CAPABILITY: if (ieee80211_eht_capa_size_ok(elems->he_cap, - data, len)) { + data, len, + params->from_ap)) { elems->eht_cap = data; elems->eht_cap_len = len; } @@ -1385,7 +1388,7 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, case WLAN_EID_EXTENSION: ieee80211_parse_extension_element(calc_crc ? &crc : NULL, - elem, elems); + elem, elems, params); break; case WLAN_EID_S1G_CAPABILITIES: if (elen >= sizeof(*elems->s1g_capab)) @@ -2025,7 +2028,8 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata, cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band), IEEE80211_CHAN_NO_HE | IEEE80211_CHAN_NO_EHT)) { - pos = ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, end); + pos = ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, end, + sdata->vif.type == NL80211_IFTYPE_AP); if (!pos) goto out_err; } @@ -2526,7 +2530,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (link) ieee80211_assign_chanctx(local, sdata, link); } - sdata_unlock(sdata); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: @@ -2545,6 +2548,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) &sdata->deflink.tx_conf[i]); break; } + sdata_unlock(sdata); /* common change flags for all interface types */ changed = BSS_CHANGED_ERP_CTS_PROT | @@ -2653,23 +2657,21 @@ int ieee80211_reconfig(struct ieee80211_local *local) } /* APs are now beaconing, add back stations */ - mutex_lock(&local->sta_mtx); - list_for_each_entry(sta, &local->sta_list, list) { - enum ieee80211_sta_state state; - - if (!sta->uploaded) - continue; - - if (sta->sdata->vif.type != NL80211_IFTYPE_AP && - sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN) + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) continue; - for (state = IEEE80211_STA_NOTEXIST; - state < sta->sta_state; state++) - WARN_ON(drv_sta_state(local, sta->sdata, sta, state, - state + 1)); + sdata_lock(sdata); + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_AP: + ieee80211_reconfig_stations(sdata); + break; + default: + break; + } + sdata_unlock(sdata); } - mutex_unlock(&local->sta_mtx); /* add back keys */ list_for_each_entry(sdata, &local->interfaces, list) @@ -3080,6 +3082,7 @@ end: } void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps_mode, struct sk_buff *skb) { struct ieee80211_supported_band *sband; @@ -3106,7 +3109,7 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, cap = le16_to_cpu(iftd->he_6ghz_capa.capa); cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS; - switch (sdata->deflink.smps_mode) { + switch (smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); @@ -3507,8 +3510,7 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info, return true; } -void ieee80211_chandef_eht_oper(struct ieee80211_sub_if_data *sdata, - const struct ieee80211_eht_operation *eht_oper, +void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation *eht_oper, bool support_160, bool support_320, struct cfg80211_chan_def *chandef) { @@ -3684,7 +3686,7 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, support_320 = eht_phy_cap & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; - ieee80211_chandef_eht_oper(sdata, eht_oper, support_160, + ieee80211_chandef_eht_oper(eht_oper, support_160, support_320, &he_chandef); } @@ -4770,6 +4772,7 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype) const struct ieee80211_sta_he_cap *he_cap; const struct ieee80211_sta_eht_cap *eht_cap; struct ieee80211_supported_band *sband; + bool is_ap; u8 n; sband = ieee80211_get_sband(sdata); @@ -4781,8 +4784,12 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype) if (!he_cap || !eht_cap) return 0; + is_ap = iftype == NL80211_IFTYPE_AP || + iftype == NL80211_IFTYPE_P2P_GO; + n = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, - &eht_cap->eht_cap_elem); + &eht_cap->eht_cap_elem, + is_ap); return 2 + 1 + sizeof(he_cap->he_cap_elem) + n + ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], @@ -4793,7 +4800,8 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype) u8 *ieee80211_ie_build_eht_cap(u8 *pos, const struct ieee80211_sta_he_cap *he_cap, const struct ieee80211_sta_eht_cap *eht_cap, - u8 *end) + u8 *end, + bool for_ap) { u8 mcs_nss_len, ppet_len; u8 ie_len; @@ -4804,7 +4812,8 @@ u8 *ieee80211_ie_build_eht_cap(u8 *pos, return orig_pos; mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, - &eht_cap->eht_cap_elem); + &eht_cap->eht_cap_elem, + for_ap); ppet_len = ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], eht_cap->eht_cap_elem.phy_cap_info); diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 93ec2f349748..20f742b5503b 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -351,7 +351,7 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad) * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ put_unaligned_be16(len_a, &aad[0]); put_unaligned(mask_fc, (__le16 *)&aad[2]); - memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN); + memcpy(&aad[4], &hdr->addrs, 3 * ETH_ALEN); /* Mask Seq#, leave Frag# */ aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; @@ -792,7 +792,7 @@ static void bip_aad(struct sk_buff *skb, u8 *aad) IEEE80211_FCTL_MOREDATA); put_unaligned(mask_fc, (__le16 *) &aad[0]); /* A1 || A2 || A3 */ - memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN); + memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN); } diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index b8ce84618a55..c439125ef2b9 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -44,7 +44,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, switch (mac_cb(skb)->dest.mode) { case IEEE802154_ADDR_NONE: - if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE) + if (hdr->source.mode != IEEE802154_ADDR_NONE) /* FIXME: check if we are PAN coordinator */ skb->pkt_type = PACKET_OTHERHOST; else diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 35b5f806fdda..b52afe316dc4 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev, p = per_cpu_ptr(mdev->stats, i); do { - start = u64_stats_fetch_begin(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); local = p->stats; - } while (u64_stats_fetch_retry(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += local.rx_packets; stats->rx_bytes += local.rx_bytes; diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 291b5da42fdb..9813ed0fde9b 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -796,7 +796,7 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, u8 rm_id = rm_list->ids[i]; bool removed = false; - list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { + mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); int how = RCV_SHUTDOWN | SEND_SHUTDOWN; u8 id = subflow->local_id; @@ -1327,7 +1327,7 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } - entry = kmalloc(sizeof(*entry), GFP_KERNEL); + entry = kmalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT); if (!entry) { GENL_SET_ERR_MSG(info, "can't allocate addr"); return -ENOMEM; @@ -2218,17 +2218,17 @@ static const struct genl_small_ops mptcp_pm_ops[] = { { .cmd = MPTCP_PM_CMD_ADD_ADDR, .doit = mptcp_nl_cmd_add_addr, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MPTCP_PM_CMD_DEL_ADDR, .doit = mptcp_nl_cmd_del_addr, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MPTCP_PM_CMD_FLUSH_ADDRS, .doit = mptcp_nl_cmd_flush_addrs, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MPTCP_PM_CMD_GET_ADDR, @@ -2238,7 +2238,7 @@ static const struct genl_small_ops mptcp_pm_ops[] = { { .cmd = MPTCP_PM_CMD_SET_LIMITS, .doit = mptcp_nl_cmd_set_limits, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MPTCP_PM_CMD_GET_LIMITS, @@ -2247,27 +2247,27 @@ static const struct genl_small_ops mptcp_pm_ops[] = { { .cmd = MPTCP_PM_CMD_SET_FLAGS, .doit = mptcp_nl_cmd_set_flags, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MPTCP_PM_CMD_ANNOUNCE, .doit = mptcp_nl_cmd_announce, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MPTCP_PM_CMD_REMOVE, .doit = mptcp_nl_cmd_remove, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MPTCP_PM_CMD_SUBFLOW_CREATE, .doit = mptcp_nl_cmd_sf_create, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MPTCP_PM_CMD_SUBFLOW_DESTROY, .doit = mptcp_nl_cmd_sf_destroy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, }; @@ -2280,6 +2280,7 @@ static struct genl_family mptcp_genl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = mptcp_pm_ops, .n_small_ops = ARRAY_SIZE(mptcp_pm_ops), + .resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1, .mcgrps = mptcp_pm_mcgrps, .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps), }; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index da4257504fad..47931f6cf387 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1263,7 +1263,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset); - if (!can_coalesce && i >= sysctl_max_skb_frags) { + if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) { tcp_mark_push(tcp_sk(ssk), skb); goto alloc_skb; } @@ -1538,8 +1538,9 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) struct mptcp_sendmsg_info info = { .flags = flags, }; + bool do_check_data_fin = false; struct mptcp_data_frag *dfrag; - int len, copied = 0; + int len; while ((dfrag = mptcp_send_head(sk))) { info.sent = dfrag->already_sent; @@ -1574,8 +1575,8 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) goto out; } + do_check_data_fin = true; info.sent += ret; - copied += ret; len -= ret; mptcp_update_post_push(msk, dfrag, ret); @@ -1591,7 +1592,7 @@ out: /* ensure the rtx timer is running */ if (!mptcp_timer_pending(sk)) mptcp_reset_timer(sk); - if (copied) + if (do_check_data_fin) __mptcp_check_send_data_fin(sk); } @@ -2357,7 +2358,7 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk) might_sleep(); - list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { + mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); if (inet_sk_state_load(ssk) != TCP_CLOSE) @@ -2400,7 +2401,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk) mptcp_token_destroy(msk); - list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { + mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); bool slow; @@ -3047,7 +3048,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) __mptcp_clear_xmit(sk); /* join list will be eventually flushed (with rst) at sock lock release time */ - list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) + mptcp_for_each_subflow_safe(msk, subflow, tmp) __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); /* move to sk_receive_queue, sk_stream_kill_queues will purge it */ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 132d50833df1..c1b12318535d 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -314,6 +314,8 @@ struct mptcp_sock { #define mptcp_for_each_subflow(__msk, __subflow) \ list_for_each_entry(__subflow, &((__msk)->conn_list), node) +#define mptcp_for_each_subflow_safe(__msk, __subflow, __tmp) \ + list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node) static inline void msk_owned_by_me(const struct mptcp_sock *msk) { diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index c189b4c8a182..d27f4eccce6d 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c @@ -768,6 +768,7 @@ static struct genl_family ncsi_genl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = ncsi_ops, .n_small_ops = ARRAY_SIZE(ncsi_ops), + .resv_start_op = NCSI_CMD_SET_CHANNEL_MASK + 1, }; static int __init ncsi_init_netlink(void) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 22f15ebf6045..4b8d04640ff3 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -144,7 +144,6 @@ config NF_CONNTRACK_ZONES config NF_CONNTRACK_PROCFS bool "Supply CT list in procfs (OBSOLETE)" - default y depends on PROC_FS help This option enables for the list of known conntrack entries diff --git a/net/netfilter/core.c b/net/netfilter/core.c index dcf752b55a52..5a6705a0e4ec 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -300,12 +300,6 @@ nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum, if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum)) return NULL; return net->nf.hooks_ipv6 + hooknum; -#if IS_ENABLED(CONFIG_DECNET) - case NFPROTO_DECNET: - if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum)) - return NULL; - return net->nf.hooks_decnet + hooknum; -#endif default: WARN_ON_ONCE(1); return NULL; @@ -750,10 +744,6 @@ static int __net_init netfilter_net_init(struct net *net) #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE __netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge)); #endif -#if IS_ENABLED(CONFIG_DECNET) - __netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet)); -#endif - #ifdef CONFIG_PROC_FS net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter", net->proc_net); diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 16ae92054baa..e7ba5b6dd2b7 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -353,7 +353,7 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); if (unlikely(!c)) return; - strlcpy(c->str, ext->comment, len + 1); + strscpy(c->str, ext->comment, len + 1); set->ext_size += sizeof(*c) + strlen(c->str) + 1; rcu_assign_pointer(comment->c, c); } @@ -1072,7 +1072,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info, if (!set) return -ENOMEM; spin_lock_init(&set->lock); - strlcpy(set->name, name, IPSET_MAXNAMELEN); + strscpy(set->name, name, IPSET_MAXNAMELEN); set->family = family; set->revision = revision; @@ -1719,11 +1719,13 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb, skb2 = nlmsg_new(payload, GFP_KERNEL); if (!skb2) return -ENOMEM; - rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid, - nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); + rep = nlmsg_put(skb2, NETLINK_CB(skb).portid, + nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); errmsg = nlmsg_data(rep); errmsg->error = ret; - memcpy(&errmsg->msg, nlh, nlh->nlmsg_len); + unsafe_memcpy(&errmsg->msg, nlh, nlh->nlmsg_len, + /* Bounds checked by the skb layer. */); + cmdattr = (void *)&errmsg->msg + min_len; ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr, diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index efab2b06d373..988222fff9f0 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2611,7 +2611,7 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) dst->addr = src->addr.ip; dst->port = src->port; dst->fwmark = src->fwmark; - strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name)); + strscpy(dst->sched_name, sched_name, sizeof(dst->sched_name)); dst->flags = src->flags; dst->timeout = src->timeout / HZ; dst->netmask = src->netmask; @@ -2805,13 +2805,13 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) mutex_lock(&ipvs->sync_mutex); if (ipvs->sync_state & IP_VS_STATE_MASTER) { d[0].state = IP_VS_STATE_MASTER; - strlcpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn, + strscpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn, sizeof(d[0].mcast_ifn)); d[0].syncid = ipvs->mcfg.syncid; } if (ipvs->sync_state & IP_VS_STATE_BACKUP) { d[1].state = IP_VS_STATE_BACKUP; - strlcpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn, + strscpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn, sizeof(d[1].mcast_ifn)); d[1].syncid = ipvs->bcfg.syncid; } @@ -3561,7 +3561,7 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs) attrs[IPVS_DAEMON_ATTR_MCAST_IFN] && attrs[IPVS_DAEMON_ATTR_SYNC_ID])) return -EINVAL; - strlcpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), + strscpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), sizeof(c.mcast_ifn)); c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]); @@ -4005,6 +4005,7 @@ static struct genl_family ip_vs_genl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = ip_vs_genl_ops, .n_small_ops = ARRAY_SIZE(ip_vs_genl_ops), + .resv_start_op = IPVS_CMD_FLUSH + 1, }; static int __init ip_vs_genl_register(void) diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 9d43277b8b4f..a56fd0b5a430 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1280,12 +1280,12 @@ static void set_sock_size(struct sock *sk, int mode, int val) lock_sock(sk); if (mode) { val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, - sysctl_wmem_max); + READ_ONCE(sysctl_wmem_max)); sk->sk_sndbuf = val * 2; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } else { val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, - sysctl_rmem_max); + READ_ONCE(sysctl_rmem_max)); sk->sk_rcvbuf = val * 2; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 71c2f4f95d36..c5851e1321e7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1782,7 +1782,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, } spin_unlock_bh(&nf_conntrack_expect_lock); } - if (!exp) + if (!exp && tmpl) __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); /* Other CPU might have obtained a pointer to this object before it was @@ -2068,10 +2068,6 @@ void nf_conntrack_alter_reply(struct nf_conn *ct, ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; if (ct->master || (help && !hlist_empty(&help->expectations))) return; - - rcu_read_lock(); - __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); - rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); @@ -2797,7 +2793,6 @@ int nf_conntrack_init_net(struct net *net) nf_conntrack_acct_pernet_init(net); nf_conntrack_tstamp_pernet_init(net); nf_conntrack_ecache_pernet_init(net); - nf_conntrack_helper_pernet_init(net); nf_conntrack_proto_pernet_init(net); return 0; @@ -2807,10 +2802,6 @@ err_expect: return ret; } -#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \ - (IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES) || \ - IS_ENABLED(CONFIG_NF_CT_NETLINK)) - /* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */ int __nf_ct_change_timeout(struct nf_conn *ct, u64 timeout) @@ -2866,5 +2857,3 @@ int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status) return 0; } EXPORT_SYMBOL_GPL(nf_ct_change_status_common); - -#endif diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index a414274338cf..0d9332e9cf71 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -34,11 +34,6 @@ MODULE_DESCRIPTION("ftp connection tracking helper"); MODULE_ALIAS("ip_conntrack_ftp"); MODULE_ALIAS_NFCT_HELPER(HELPER_NAME); -/* This is slow, but it's simple. --RR */ -static char *ftp_buffer; - -static DEFINE_SPINLOCK(nf_ftp_lock); - #define MAX_PORTS 8 static u_int16_t ports[MAX_PORTS]; static unsigned int ports_c; @@ -398,6 +393,9 @@ static int help(struct sk_buff *skb, return NF_ACCEPT; } + if (unlikely(skb_linearize(skb))) + return NF_DROP; + th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph); if (th == NULL) return NF_ACCEPT; @@ -411,12 +409,8 @@ static int help(struct sk_buff *skb, } datalen = skb->len - dataoff; - spin_lock_bh(&nf_ftp_lock); - fb_ptr = skb_header_pointer(skb, dataoff, datalen, ftp_buffer); - if (!fb_ptr) { - spin_unlock_bh(&nf_ftp_lock); - return NF_ACCEPT; - } + spin_lock_bh(&ct->lock); + fb_ptr = skb->data + dataoff; ends_in_nl = (fb_ptr[datalen - 1] == '\n'); seq = ntohl(th->seq) + datalen; @@ -544,7 +538,7 @@ out_update_nl: if (ends_in_nl) update_nl_seq(ct, seq, ct_ftp_info, dir, skb); out: - spin_unlock_bh(&nf_ftp_lock); + spin_unlock_bh(&ct->lock); return ret; } @@ -571,7 +565,6 @@ static const struct nf_conntrack_expect_policy ftp_exp_policy = { static void __exit nf_conntrack_ftp_fini(void) { nf_conntrack_helpers_unregister(ftp, ports_c * 2); - kfree(ftp_buffer); } static int __init nf_conntrack_ftp_init(void) @@ -580,10 +573,6 @@ static int __init nf_conntrack_ftp_init(void) NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_ftp_master)); - ftp_buffer = kmalloc(65536, GFP_KERNEL); - if (!ftp_buffer) - return -ENOMEM; - if (ports_c == 0) ports[ports_c++] = FTP_PORT; @@ -603,7 +592,6 @@ static int __init nf_conntrack_ftp_init(void) ret = nf_conntrack_helpers_register(ftp, ports_c * 2); if (ret < 0) { pr_err("failed to register helpers\n"); - kfree(ftp_buffer); return ret; } diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index bb76305bb7ff..5a9bce24f3c3 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -34,6 +34,8 @@ #include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_h323.h> +#define H323_MAX_SIZE 65535 + /* Parameters */ static unsigned int default_rrq_ttl __read_mostly = 300; module_param(default_rrq_ttl, uint, 0600); @@ -86,6 +88,9 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff, if (tcpdatalen <= 0) /* No TCP data */ goto clear_out; + if (tcpdatalen > H323_MAX_SIZE) + tcpdatalen = H323_MAX_SIZE; + if (*data == NULL) { /* first TPKT */ /* Get first TPKT pointer */ tpkt = skb_header_pointer(skb, tcpdataoff, tcpdatalen, @@ -1169,6 +1174,9 @@ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff, if (dataoff >= skb->len) return NULL; *datalen = skb->len - dataoff; + if (*datalen > H323_MAX_SIZE) + *datalen = H323_MAX_SIZE; + return skb_header_pointer(skb, dataoff, *datalen, h323_buffer); } @@ -1770,7 +1778,7 @@ static int __init nf_conntrack_h323_init(void) NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_h323_master)); - h323_buffer = kmalloc(65536, GFP_KERNEL); + h323_buffer = kmalloc(H323_MAX_SIZE + 1, GFP_KERNEL); if (!h323_buffer) return -ENOMEM; ret = h323_helper_init(); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index e96b32221444..ff737a76052e 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -35,11 +35,6 @@ unsigned int nf_ct_helper_hsize __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_helper_hsize); static unsigned int nf_ct_helper_count __read_mostly; -static bool nf_ct_auto_assign_helper __read_mostly = false; -module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644); -MODULE_PARM_DESC(nf_conntrack_helper, - "Enable automatic conntrack helper assignment (default 0)"); - static DEFINE_MUTEX(nf_ct_nat_helpers_mutex); static struct list_head nf_ct_nat_helpers __read_mostly; @@ -51,24 +46,6 @@ static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple) (__force __u16)tuple->src.u.all) % nf_ct_helper_hsize; } -static struct nf_conntrack_helper * -__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) -{ - struct nf_conntrack_helper *helper; - struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; - unsigned int h; - - if (!nf_ct_helper_count) - return NULL; - - h = helper_hash(tuple); - hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) { - if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) - return helper; - } - return NULL; -} - struct nf_conntrack_helper * __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) { @@ -209,33 +186,11 @@ nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) } EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add); -static struct nf_conntrack_helper * -nf_ct_lookup_helper(struct nf_conn *ct, struct net *net) -{ - struct nf_conntrack_net *cnet = nf_ct_pernet(net); - - if (!cnet->sysctl_auto_assign_helper) { - if (cnet->auto_assign_helper_warned) - return NULL; - if (!__nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple)) - return NULL; - pr_info("nf_conntrack: default automatic helper assignment " - "has been turned off for security reasons and CT-based " - "firewall rule not found. Use the iptables CT target " - "to attach helpers instead.\n"); - cnet->auto_assign_helper_warned = true; - return NULL; - } - - return __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); -} - int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, gfp_t flags) { struct nf_conntrack_helper *helper = NULL; struct nf_conn_help *help; - struct net *net = nf_ct_net(ct); /* We already got a helper explicitly attached. The function * nf_conntrack_alter_reply - in case NAT is in use - asks for looking @@ -246,23 +201,21 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, if (test_bit(IPS_HELPER_BIT, &ct->status)) return 0; - if (tmpl != NULL) { - help = nfct_help(tmpl); - if (help != NULL) { - helper = rcu_dereference(help->helper); - set_bit(IPS_HELPER_BIT, &ct->status); - } + if (WARN_ON_ONCE(!tmpl)) + return 0; + + help = nfct_help(tmpl); + if (help != NULL) { + helper = rcu_dereference(help->helper); + set_bit(IPS_HELPER_BIT, &ct->status); } help = nfct_help(ct); if (helper == NULL) { - helper = nf_ct_lookup_helper(ct, net); - if (helper == NULL) { - if (help) - RCU_INIT_POINTER(help->helper, NULL); - return 0; - } + if (help) + RCU_INIT_POINTER(help->helper, NULL); + return 0; } if (help == NULL) { @@ -545,19 +498,6 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat) } EXPORT_SYMBOL_GPL(nf_nat_helper_unregister); -void nf_ct_set_auto_assign_helper_warned(struct net *net) -{ - nf_ct_pernet(net)->auto_assign_helper_warned = true; -} -EXPORT_SYMBOL_GPL(nf_ct_set_auto_assign_helper_warned); - -void nf_conntrack_helper_pernet_init(struct net *net) -{ - struct nf_conntrack_net *cnet = nf_ct_pernet(net); - - cnet->sysctl_auto_assign_helper = nf_ct_auto_assign_helper; -} - int nf_conntrack_helper_init(void) { nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 08ee4e760a3d..992decbcaa5c 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -39,6 +39,7 @@ unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, EXPORT_SYMBOL_GPL(nf_nat_irc_hook); #define HELPER_NAME "irc" +#define MAX_SEARCH_SIZE 4095 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("IRC (DCC) connection tracking helper"); @@ -121,6 +122,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, int i, ret = NF_ACCEPT; char *addr_beg_p, *addr_end_p; typeof(nf_nat_irc_hook) nf_nat_irc; + unsigned int datalen; /* If packet is coming from IRC server */ if (dir == IP_CT_DIR_REPLY) @@ -140,8 +142,12 @@ static int help(struct sk_buff *skb, unsigned int protoff, if (dataoff >= skb->len) return NF_ACCEPT; + datalen = skb->len - dataoff; + if (datalen > MAX_SEARCH_SIZE) + datalen = MAX_SEARCH_SIZE; + spin_lock_bh(&irc_buffer_lock); - ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff, + ib_ptr = skb_header_pointer(skb, dataoff, datalen, irc_buffer); if (!ib_ptr) { spin_unlock_bh(&irc_buffer_lock); @@ -149,7 +155,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, } data = ib_ptr; - data_limit = ib_ptr + skb->len - dataoff; + data_limit = ib_ptr + datalen; /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ @@ -188,8 +194,9 @@ static int help(struct sk_buff *skb, unsigned int protoff, /* dcc_ip can be the internal OR external (NAT'ed) IP */ tuple = &ct->tuplehash[dir].tuple; - if (tuple->src.u3.ip != dcc_ip && - tuple->dst.u3.ip != dcc_ip) { + if ((tuple->src.u3.ip != dcc_ip && + ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) || + dcc_port == 0) { net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", &tuple->src.u3.ip, &dcc_ip, dcc_port); @@ -251,7 +258,7 @@ static int __init nf_conntrack_irc_init(void) irc_exp_policy.max_expected = max_dcc_channels; irc_exp_policy.timeout = dcc_timeout; - irc_buffer = kmalloc(65536, GFP_KERNEL); + irc_buffer = kmalloc(MAX_SEARCH_SIZE + 1, GFP_KERNEL); if (!irc_buffer) return -ENOMEM; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 04169b54f2a2..7562b215b932 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2298,11 +2298,6 @@ ctnetlink_create_conntrack(struct net *net, ct->status |= IPS_HELPER; RCU_INIT_POINTER(help->helper, helper); } - } else { - /* try an implicit helper assignation */ - err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); - if (err < 0) - goto err2; } err = ctnetlink_setup_nat(ct, cda); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index a63b51dceaf2..656631083177 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -47,6 +47,12 @@ static const char *const tcp_conntrack_names[] = { "SYN_SENT2", }; +enum nf_ct_tcp_action { + NFCT_TCP_IGNORE, + NFCT_TCP_INVALID, + NFCT_TCP_ACCEPT, +}; + #define SECS * HZ #define MINS * 60 SECS #define HOURS * 60 MINS @@ -472,23 +478,45 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender, } } -static bool tcp_in_window(struct nf_conn *ct, - enum ip_conntrack_dir dir, - unsigned int index, - const struct sk_buff *skb, - unsigned int dataoff, - const struct tcphdr *tcph, - const struct nf_hook_state *hook_state) +__printf(6, 7) +static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb, + const struct nf_conn *ct, + const struct nf_hook_state *state, + const struct ip_ct_tcp_state *sender, + enum nf_ct_tcp_action ret, + const char *fmt, ...) +{ + const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct)); + struct va_format vaf; + va_list args; + bool be_liberal; + + be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal; + if (be_liberal) + return NFCT_TCP_ACCEPT; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf); + va_end(args); + + return ret; +} + +static enum nf_ct_tcp_action +tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir, + unsigned int index, const struct sk_buff *skb, + unsigned int dataoff, const struct tcphdr *tcph, + const struct nf_hook_state *hook_state) { struct ip_ct_tcp *state = &ct->proto.tcp; - struct net *net = nf_ct_net(ct); - struct nf_tcp_net *tn = nf_tcp_pernet(net); struct ip_ct_tcp_state *sender = &state->seen[dir]; struct ip_ct_tcp_state *receiver = &state->seen[!dir]; __u32 seq, ack, sack, end, win, swin; - u16 win_raw; + bool in_recv_win, seq_ok; s32 receiver_offset; - bool res, in_recv_win; + u16 win_raw; /* * Get the required data from the packet. @@ -517,7 +545,7 @@ static bool tcp_in_window(struct nf_conn *ct, end, win); if (!tcph->ack) /* Simultaneous open */ - return true; + return NFCT_TCP_ACCEPT; } else { /* * We are in the middle of a connection, @@ -560,7 +588,7 @@ static bool tcp_in_window(struct nf_conn *ct, end, win); if (dir == IP_CT_DIR_REPLY && !tcph->ack) - return true; + return NFCT_TCP_ACCEPT; } if (!(tcph->ack)) { @@ -584,91 +612,166 @@ static bool tcp_in_window(struct nf_conn *ct, */ seq = end = sender->td_end; - /* Is the ending sequence in the receive window (if available)? */ - in_recv_win = !receiver->td_maxwin || - after(end, sender->td_end - receiver->td_maxwin - 1); - - if (before(seq, sender->td_maxend + 1) && - in_recv_win && - before(sack, receiver->td_end + 1) && - after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { - /* - * Take into account window scaling (RFC 1323). - */ - if (!tcph->syn) - win <<= sender->td_scale; - - /* - * Update sender data. - */ - swin = win + (sack - ack); - if (sender->td_maxwin < swin) - sender->td_maxwin = swin; - if (after(end, sender->td_end)) { + seq_ok = before(seq, sender->td_maxend + 1); + if (!seq_ok) { + u32 overshot = end - sender->td_maxend + 1; + bool ack_ok; + + ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1); + in_recv_win = receiver->td_maxwin && + after(end, sender->td_end - receiver->td_maxwin - 1); + + if (in_recv_win && + ack_ok && + overshot <= receiver->td_maxwin && + before(sack, receiver->td_end + 1)) { + /* Work around TCPs that send more bytes than allowed by + * the receive window. + * + * If the (marked as invalid) packet is allowed to pass by + * the ruleset and the peer acks this data, then its possible + * all future packets will trigger 'ACK is over upper bound' check. + * + * Thus if only the sequence check fails then do update td_end so + * possible ACK for this data can update internal state. + */ sender->td_end = end; sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; - } - if (tcph->ack) { - if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) { - sender->td_maxack = ack; - sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET; - } else if (after(ack, sender->td_maxack)) - sender->td_maxack = ack; + + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE, + "%u bytes more than expected", overshot); } - /* - * Update receiver data. - */ - if (receiver->td_maxwin != 0 && after(end, sender->td_maxend)) - receiver->td_maxwin += end - sender->td_maxend; - if (after(sack + win, receiver->td_maxend - 1)) { - receiver->td_maxend = sack + win; - if (win == 0) - receiver->td_maxend++; + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID, + "SEQ is over upper bound %u (over the window of the receiver)", + sender->td_maxend + 1); + } + + if (!before(sack, receiver->td_end + 1)) + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID, + "ACK is over upper bound %u (ACKed data not seen yet)", + receiver->td_end + 1); + + /* Is the ending sequence in the receive window (if available)? */ + in_recv_win = !receiver->td_maxwin || + after(end, sender->td_end - receiver->td_maxwin - 1); + if (!in_recv_win) + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE, + "SEQ is under lower bound %u (already ACKed data retransmitted)", + sender->td_end - receiver->td_maxwin - 1); + if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE, + "ignored ACK under lower bound %u (possible overly delayed)", + receiver->td_end - MAXACKWINDOW(sender) - 1); + + /* Take into account window scaling (RFC 1323). */ + if (!tcph->syn) + win <<= sender->td_scale; + + /* Update sender data. */ + swin = win + (sack - ack); + if (sender->td_maxwin < swin) + sender->td_maxwin = swin; + if (after(end, sender->td_end)) { + sender->td_end = end; + sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; + } + if (tcph->ack) { + if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) { + sender->td_maxack = ack; + sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET; + } else if (after(ack, sender->td_maxack)) { + sender->td_maxack = ack; } - if (ack == receiver->td_end) - receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; + } - /* - * Check retransmissions. - */ - if (index == TCP_ACK_SET) { - if (state->last_dir == dir - && state->last_seq == seq - && state->last_ack == ack - && state->last_end == end - && state->last_win == win_raw) - state->retrans++; - else { - state->last_dir = dir; - state->last_seq = seq; - state->last_ack = ack; - state->last_end = end; - state->last_win = win_raw; - state->retrans = 0; - } + /* Update receiver data. */ + if (receiver->td_maxwin != 0 && after(end, sender->td_maxend)) + receiver->td_maxwin += end - sender->td_maxend; + if (after(sack + win, receiver->td_maxend - 1)) { + receiver->td_maxend = sack + win; + if (win == 0) + receiver->td_maxend++; + } + if (ack == receiver->td_end) + receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; + + /* Check retransmissions. */ + if (index == TCP_ACK_SET) { + if (state->last_dir == dir && + state->last_seq == seq && + state->last_ack == ack && + state->last_end == end && + state->last_win == win_raw) { + state->retrans++; + } else { + state->last_dir = dir; + state->last_seq = seq; + state->last_ack = ack; + state->last_end = end; + state->last_win = win_raw; + state->retrans = 0; } - res = true; - } else { - res = false; - if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || - tn->tcp_be_liberal) - res = true; - if (!res) { + } + + return NFCT_TCP_ACCEPT; +} + +static void __cold nf_tcp_handle_invalid(struct nf_conn *ct, + enum ip_conntrack_dir dir, + int index, + const struct sk_buff *skb, + const struct nf_hook_state *hook_state) +{ + const unsigned int *timeouts; + const struct nf_tcp_net *tn; + unsigned int timeout; + u32 expires; + + if (!test_bit(IPS_ASSURED_BIT, &ct->status) || + test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) + return; + + /* We don't want to have connections hanging around in ESTABLISHED + * state for long time 'just because' conntrack deemed a FIN/RST + * out-of-window. + * + * Shrink the timeout just like when there is unacked data. + * This speeds up eviction of 'dead' connections where the + * connection and conntracks internal state are out of sync. + */ + switch (index) { + case TCP_RST_SET: + case TCP_FIN_SET: + break; + default: + return; + } + + if (ct->proto.tcp.last_dir != dir && + (ct->proto.tcp.last_index == TCP_FIN_SET || + ct->proto.tcp.last_index == TCP_RST_SET)) { + expires = nf_ct_expires(ct); + if (expires < 120 * HZ) + return; + + tn = nf_tcp_pernet(nf_ct_net(ct)); + timeouts = nf_ct_timeout_lookup(ct); + if (!timeouts) + timeouts = tn->timeouts; + + timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]); + if (expires > timeout) { nf_ct_l4proto_log_invalid(skb, ct, hook_state, - "%s", - before(seq, sender->td_maxend + 1) ? - in_recv_win ? - before(sack, receiver->td_end + 1) ? - after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" - : "ACK is under the lower bound (possible overly delayed ACK)" - : "ACK is over the upper bound (ACKed data not seen yet)" - : "SEQ is under the lower bound (already ACKed data retransmitted)" - : "SEQ is over the upper bound (over the window of the receiver)"); + "packet (index %d, dir %d) response for index %d lower timeout to %u", + index, dir, ct->proto.tcp.last_index, timeout); + + WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp); } + } else { + ct->proto.tcp.last_index = index; + ct->proto.tcp.last_dir = dir; } - - return res; } /* table of valid flag combinations - PUSH, ECE and CWR are always valid */ @@ -830,6 +933,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, struct nf_conntrack_tuple *tuple; enum tcp_conntrack new_state, old_state; unsigned int index, *timeouts; + enum nf_ct_tcp_action res; enum ip_conntrack_dir dir; const struct tcphdr *th; struct tcphdr _tcph; @@ -1095,10 +1199,18 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, break; } - if (!tcp_in_window(ct, dir, index, - skb, dataoff, th, state)) { + res = tcp_in_window(ct, dir, index, + skb, dataoff, th, state); + switch (res) { + case NFCT_TCP_IGNORE: + spin_unlock_bh(&ct->lock); + return NF_ACCEPT; + case NFCT_TCP_INVALID: + nf_tcp_handle_invalid(ct, dir, index, skb, state); spin_unlock_bh(&ct->lock); return -NF_ACCEPT; + case NFCT_TCP_ACCEPT: + break; } in_window: /* From now on we have got in-window packets */ diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c index fcb33b1d5456..13dc421fc4f5 100644 --- a/net/netfilter/nf_conntrack_sane.c +++ b/net/netfilter/nf_conntrack_sane.c @@ -34,10 +34,6 @@ MODULE_AUTHOR("Michal Schmidt <mschmidt@redhat.com>"); MODULE_DESCRIPTION("SANE connection tracking helper"); MODULE_ALIAS_NFCT_HELPER(HELPER_NAME); -static char *sane_buffer; - -static DEFINE_SPINLOCK(nf_sane_lock); - #define MAX_PORTS 8 static u_int16_t ports[MAX_PORTS]; static unsigned int ports_c; @@ -67,14 +63,16 @@ static int help(struct sk_buff *skb, unsigned int dataoff, datalen; const struct tcphdr *th; struct tcphdr _tcph; - void *sb_ptr; int ret = NF_ACCEPT; int dir = CTINFO2DIR(ctinfo); struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct); struct nf_conntrack_expect *exp; struct nf_conntrack_tuple *tuple; - struct sane_request *req; struct sane_reply_net_start *reply; + union { + struct sane_request req; + struct sane_reply_net_start repl; + } buf; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && @@ -92,59 +90,62 @@ static int help(struct sk_buff *skb, return NF_ACCEPT; datalen = skb->len - dataoff; - - spin_lock_bh(&nf_sane_lock); - sb_ptr = skb_header_pointer(skb, dataoff, datalen, sane_buffer); - if (!sb_ptr) { - spin_unlock_bh(&nf_sane_lock); - return NF_ACCEPT; - } - if (dir == IP_CT_DIR_ORIGINAL) { + const struct sane_request *req; + if (datalen != sizeof(struct sane_request)) - goto out; + return NF_ACCEPT; + + req = skb_header_pointer(skb, dataoff, datalen, &buf.req); + if (!req) + return NF_ACCEPT; - req = sb_ptr; if (req->RPC_code != htonl(SANE_NET_START)) { /* Not an interesting command */ - ct_sane_info->state = SANE_STATE_NORMAL; - goto out; + WRITE_ONCE(ct_sane_info->state, SANE_STATE_NORMAL); + return NF_ACCEPT; } /* We're interested in the next reply */ - ct_sane_info->state = SANE_STATE_START_REQUESTED; - goto out; + WRITE_ONCE(ct_sane_info->state, SANE_STATE_START_REQUESTED); + return NF_ACCEPT; } + /* IP_CT_DIR_REPLY */ + /* Is it a reply to an uninteresting command? */ - if (ct_sane_info->state != SANE_STATE_START_REQUESTED) - goto out; + if (READ_ONCE(ct_sane_info->state) != SANE_STATE_START_REQUESTED) + return NF_ACCEPT; /* It's a reply to SANE_NET_START. */ - ct_sane_info->state = SANE_STATE_NORMAL; + WRITE_ONCE(ct_sane_info->state, SANE_STATE_NORMAL); if (datalen < sizeof(struct sane_reply_net_start)) { pr_debug("NET_START reply too short\n"); - goto out; + return NF_ACCEPT; } - reply = sb_ptr; + datalen = sizeof(struct sane_reply_net_start); + + reply = skb_header_pointer(skb, dataoff, datalen, &buf.repl); + if (!reply) + return NF_ACCEPT; + if (reply->status != htonl(SANE_STATUS_SUCCESS)) { /* saned refused the command */ pr_debug("unsuccessful SANE_STATUS = %u\n", ntohl(reply->status)); - goto out; + return NF_ACCEPT; } /* Invalid saned reply? Ignore it. */ if (reply->zero != 0) - goto out; + return NF_ACCEPT; exp = nf_ct_expect_alloc(ct); if (exp == NULL) { nf_ct_helper_log(skb, ct, "cannot alloc expectation"); - ret = NF_DROP; - goto out; + return NF_DROP; } tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; @@ -162,9 +163,6 @@ static int help(struct sk_buff *skb, } nf_ct_expect_put(exp); - -out: - spin_unlock_bh(&nf_sane_lock); return ret; } @@ -178,7 +176,6 @@ static const struct nf_conntrack_expect_policy sane_exp_policy = { static void __exit nf_conntrack_sane_fini(void) { nf_conntrack_helpers_unregister(sane, ports_c * 2); - kfree(sane_buffer); } static int __init nf_conntrack_sane_init(void) @@ -187,10 +184,6 @@ static int __init nf_conntrack_sane_init(void) NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sane_master)); - sane_buffer = kmalloc(65536, GFP_KERNEL); - if (!sane_buffer) - return -ENOMEM; - if (ports_c == 0) ports[ports_c++] = SANE_PORT; @@ -210,7 +203,6 @@ static int __init nf_conntrack_sane_init(void) ret = nf_conntrack_helpers_register(sane, ports_c * 2); if (ret < 0) { pr_err("failed to register helpers\n"); - kfree(sane_buffer); return ret; } diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 05895878610c..4ffe84c5a82c 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -561,7 +561,6 @@ enum nf_ct_sysctl_index { NF_SYSCTL_CT_LOG_INVALID, NF_SYSCTL_CT_EXPECT_MAX, NF_SYSCTL_CT_ACCT, - NF_SYSCTL_CT_HELPER, #ifdef CONFIG_NF_CONNTRACK_EVENTS NF_SYSCTL_CT_EVENTS, #endif @@ -680,14 +679,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, - [NF_SYSCTL_CT_HELPER] = { - .procname = "nf_conntrack_helper", - .maxlen = sizeof(u8), - .mode = 0644, - .proc_handler = proc_dou8vec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, #ifdef CONFIG_NF_CONNTRACK_EVENTS [NF_SYSCTL_CT_EVENTS] = { .procname = "nf_conntrack_events", @@ -1100,7 +1091,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; - table[NF_SYSCTL_CT_HELPER].data = &cnet->sysctl_auto_assign_helper; #ifdef CONFIG_NF_CONNTRACK_EVENTS table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; #endif diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 765ac779bfc8..81c26a96c30b 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -437,12 +437,17 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table, } } +void nf_flow_table_gc_run(struct nf_flowtable *flow_table) +{ + nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL); +} + static void nf_flow_offload_work_gc(struct work_struct *work) { struct nf_flowtable *flow_table; flow_table = container_of(work, struct nf_flowtable, gc_work.work); - nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL); + nf_flow_table_gc_run(flow_table); queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); } @@ -600,11 +605,11 @@ void nf_flow_table_free(struct nf_flowtable *flow_table) mutex_unlock(&flowtable_lock); cancel_delayed_work_sync(&flow_table->gc_work); - nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); - nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL); nf_flow_table_offload_flush(flow_table); - if (nf_flowtable_hw_offload(flow_table)) - nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL); + /* ... no more pending work after this stage ... */ + nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); + nf_flow_table_gc_run(flow_table); + nf_flow_table_offload_flush_cleanup(flow_table); rhashtable_destroy(&flow_table->rhashtable); } EXPORT_SYMBOL_GPL(nf_flow_table_free); diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 103b6cbf257f..b04645ced89b 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -1074,6 +1074,14 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable, flow_offload_queue_work(offload); } +void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable) +{ + if (nf_flowtable_hw_offload(flowtable)) { + flush_workqueue(nf_flow_offload_del_wq); + nf_flow_table_gc_run(flowtable); + } +} + void nf_flow_table_offload_flush(struct nf_flowtable *flowtable) { if (nf_flowtable_hw_offload(flowtable)) { diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index edee7fa944c1..8a29290149bd 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -443,9 +443,9 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, mutex_lock(&nf_log_mutex); logger = nft_log_dereference(net->nf.nf_loggers[tindex]); if (!logger) - strlcpy(buf, "NONE", sizeof(buf)); + strscpy(buf, "NONE", sizeof(buf)); else - strlcpy(buf, logger->name, sizeof(buf)); + strscpy(buf, logger->name, sizeof(buf)); mutex_unlock(&nf_log_mutex); r = proc_dostring(&tmp, write, buffer, lenp, ppos); } diff --git a/net/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c index 3bc7e0854efe..98deef6cde69 100644 --- a/net/netfilter/nf_nat_amanda.c +++ b/net/netfilter/nf_nat_amanda.c @@ -44,19 +44,7 @@ static unsigned int help(struct sk_buff *skb, exp->expectfn = nf_nat_follow_master; /* Try to get same port: if not, try to change it. */ - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { - int res; - - exp->tuple.dst.u.tcp.port = htons(port); - res = nf_ct_expect_related(exp, 0); - if (res == 0) - break; - else if (res != -EBUSY) { - port = 0; - break; - } - } - + port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port)); if (port == 0) { nf_ct_helper_log(skb, exp->master, "all ports in use"); return NF_DROP; diff --git a/net/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c index aace6768a64e..c92a436d9c48 100644 --- a/net/netfilter/nf_nat_ftp.c +++ b/net/netfilter/nf_nat_ftp.c @@ -86,22 +86,9 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb, * this one. */ exp->expectfn = nf_nat_follow_master; - /* Try to get same port: if not, try to change it. */ - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { - int ret; - - exp->tuple.dst.u.tcp.port = htons(port); - ret = nf_ct_expect_related(exp, 0); - if (ret == 0) - break; - else if (ret != -EBUSY) { - port = 0; - break; - } - } - + port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port)); if (port == 0) { - nf_ct_helper_log(skb, ct, "all ports in use"); + nf_ct_helper_log(skb, exp->master, "all ports in use"); return NF_DROP; } diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c index a263505455fc..a95a25196943 100644 --- a/net/netfilter/nf_nat_helper.c +++ b/net/netfilter/nf_nat_helper.c @@ -198,3 +198,34 @@ void nf_nat_follow_master(struct nf_conn *ct, nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); } EXPORT_SYMBOL(nf_nat_follow_master); + +u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port) +{ + static const unsigned int max_attempts = 128; + int range, attempts_left; + u16 min = port; + + range = USHRT_MAX - port; + attempts_left = range; + + if (attempts_left > max_attempts) + attempts_left = max_attempts; + + /* Try to get same port: if not, try to change it. */ + for (;;) { + int res; + + exp->tuple.dst.u.tcp.port = htons(port); + res = nf_ct_expect_related(exp, 0); + if (res == 0) + return port; + + if (res != -EBUSY || (--attempts_left < 0)) + break; + + port = min + prandom_u32_max(range); + } + + return 0; +} +EXPORT_SYMBOL_GPL(nf_nat_exp_find_port); diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c index c691ab8d234c..19c4fcc60c50 100644 --- a/net/netfilter/nf_nat_irc.c +++ b/net/netfilter/nf_nat_irc.c @@ -48,20 +48,8 @@ static unsigned int help(struct sk_buff *skb, exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; - /* Try to get same port: if not, try to change it. */ - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { - int ret; - - exp->tuple.dst.u.tcp.port = htons(port); - ret = nf_ct_expect_related(exp, 0); - if (ret == 0) - break; - else if (ret != -EBUSY) { - port = 0; - break; - } - } - + port = nf_nat_exp_find_port(exp, + ntohs(exp->saved_proto.tcp.port)); if (port == 0) { nf_ct_helper_log(skb, ct, "all ports in use"); return NF_DROP; diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c index f0a735e86851..cf4aeb299bde 100644 --- a/net/netfilter/nf_nat_sip.c +++ b/net/netfilter/nf_nat_sip.c @@ -410,19 +410,7 @@ static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff, exp->dir = !dir; exp->expectfn = nf_nat_sip_expected; - for (; port != 0; port++) { - int ret; - - exp->tuple.dst.u.udp.port = htons(port); - ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER); - if (ret == 0) - break; - else if (ret != -EBUSY) { - port = 0; - break; - } - } - + port = nf_nat_exp_find_port(exp, port); if (port == 0) { nf_ct_helper_log(skb, ct, "all ports in use for SIP"); return NF_DROP; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 3cc88998b879..4de0a30d2009 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -32,7 +32,6 @@ static LIST_HEAD(nf_tables_objects); static LIST_HEAD(nf_tables_flowtables); static LIST_HEAD(nf_tables_destroy_list); static DEFINE_SPINLOCK(nf_tables_destroy_list_lock); -static u64 table_handle; enum { NFT_VALIDATE_SKIP = 0, @@ -743,7 +742,7 @@ __printf(2, 3) int nft_request_module(struct net *net, const char *fmt, return -ENOMEM; req->done = false; - strlcpy(req->module, module_name, MODULE_NAME_LEN); + strscpy(req->module, module_name, MODULE_NAME_LEN); list_add_tail(&req->list, &nft_net->module_list); return -EAGAIN; @@ -889,7 +888,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb, rcu_read_lock(); nft_net = nft_pernet(net); - cb->seq = nft_net->base_seq; + cb->seq = READ_ONCE(nft_net->base_seq); list_for_each_entry_rcu(table, &nft_net->tables, list) { if (family != NFPROTO_UNSPEC && family != table->family) @@ -1235,7 +1234,7 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info, INIT_LIST_HEAD(&table->flowtables); table->family = family; table->flags = flags; - table->handle = ++table_handle; + table->handle = ++nft_net->table_handle; if (table->flags & NFT_TABLE_F_OWNER) table->nlpid = NETLINK_CB(skb).portid; @@ -1705,7 +1704,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb, rcu_read_lock(); nft_net = nft_pernet(net); - cb->seq = nft_net->base_seq; + cb->seq = READ_ONCE(nft_net->base_seq); list_for_each_entry_rcu(table, &nft_net->tables, list) { if (family != NFPROTO_UNSPEC && family != table->family) @@ -2167,8 +2166,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, chain->flags |= NFT_CHAIN_BASE | flags; basechain->policy = NF_ACCEPT; if (chain->flags & NFT_CHAIN_HW_OFFLOAD && - !nft_chain_offload_support(basechain)) + !nft_chain_offload_support(basechain)) { + list_splice_init(&basechain->hook_list, &hook->list); return -EOPNOTSUPP; + } flow_block_init(&basechain->flow_block); @@ -2196,9 +2197,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct netlink_ext_ack *extack) { const struct nlattr * const *nla = ctx->nla; + struct nft_stats __percpu *stats = NULL; struct nft_table *table = ctx->table; struct nft_base_chain *basechain; - struct nft_stats __percpu *stats; struct net *net = ctx->net; char name[NFT_NAME_MAXLEN]; struct nft_rule_blob *blob; @@ -2236,7 +2237,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, return PTR_ERR(stats); } rcu_assign_pointer(basechain->stats, stats); - static_branch_inc(&nft_counters_enabled); } err = nft_basechain_init(basechain, family, &hook, flags); @@ -2319,6 +2319,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, goto err_unregister_hook; } + if (stats) + static_branch_inc(&nft_counters_enabled); + table->use++; return 0; @@ -2574,6 +2577,9 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info, nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla); if (chain != NULL) { + if (chain->flags & NFT_CHAIN_BINDING) + return -EINVAL; + if (info->nlh->nlmsg_flags & NLM_F_EXCL) { NL_SET_BAD_ATTR(extack, attr); return -EEXIST; @@ -3149,7 +3155,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb, rcu_read_lock(); nft_net = nft_pernet(net); - cb->seq = nft_net->base_seq; + cb->seq = READ_ONCE(nft_net->base_seq); list_for_each_entry_rcu(table, &nft_net->tables, list) { if (family != NFPROTO_UNSPEC && family != table->family) @@ -3907,7 +3913,7 @@ cont: list_for_each_entry(i, &ctx->table->sets, list) { int tmp; - if (!nft_is_active_next(ctx->net, set)) + if (!nft_is_active_next(ctx->net, i)) continue; if (!sscanf(i->name, name, &tmp)) continue; @@ -4133,7 +4139,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); nft_net = nft_pernet(net); - cb->seq = nft_net->base_seq; + cb->seq = READ_ONCE(nft_net->base_seq); list_for_each_entry_rcu(table, &nft_net->tables, list) { if (ctx->family != NFPROTO_UNSPEC && @@ -4451,6 +4457,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]); if (err < 0) return err; + + if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT)) + return -EINVAL; + } else if (flags & NFT_SET_CONCAT) { + return -EINVAL; } if (nla[NFTA_SET_EXPR] || nla[NFTA_SET_EXPRESSIONS]) @@ -5061,6 +5072,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); nft_net = nft_pernet(net); + cb->seq = READ_ONCE(nft_net->base_seq); + list_for_each_entry_rcu(table, &nft_net->tables, list) { if (dump_ctx->ctx.family != NFPROTO_UNSPEC && dump_ctx->ctx.family != table->family) @@ -5196,6 +5209,9 @@ static int nft_setelem_parse_flags(const struct nft_set *set, if (!(set->flags & NFT_SET_INTERVAL) && *flags & NFT_SET_ELEM_INTERVAL_END) return -EINVAL; + if ((*flags & (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL)) == + (NFT_SET_ELEM_INTERVAL_END | NFT_SET_ELEM_CATCHALL)) + return -EINVAL; return 0; } @@ -5599,7 +5615,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set, err = nft_expr_clone(expr, set->exprs[i]); if (err < 0) { - nft_expr_destroy(ctx, expr); + kfree(expr); goto err_expr; } expr_array[i] = expr; @@ -5842,6 +5858,24 @@ static void nft_setelem_remove(const struct net *net, set->ops->remove(net, set, elem); } +static bool nft_setelem_valid_key_end(const struct nft_set *set, + struct nlattr **nla, u32 flags) +{ + if ((set->flags & (NFT_SET_CONCAT | NFT_SET_INTERVAL)) == + (NFT_SET_CONCAT | NFT_SET_INTERVAL)) { + if (flags & NFT_SET_ELEM_INTERVAL_END) + return false; + if (!nla[NFTA_SET_ELEM_KEY_END] && + !(flags & NFT_SET_ELEM_CATCHALL)) + return false; + } else { + if (nla[NFTA_SET_ELEM_KEY_END]) + return false; + } + + return true; +} + static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr, u32 nlmsg_flags) { @@ -5892,6 +5926,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, return -EINVAL; } + if (set->flags & NFT_SET_OBJECT) { + if (!nla[NFTA_SET_ELEM_OBJREF] && + !(flags & NFT_SET_ELEM_INTERVAL_END)) + return -EINVAL; + } else { + if (nla[NFTA_SET_ELEM_OBJREF]) + return -EINVAL; + } + + if (!nft_setelem_valid_key_end(set, nla, flags)) + return -EINVAL; + if ((flags & NFT_SET_ELEM_INTERVAL_END) && (nla[NFTA_SET_ELEM_DATA] || nla[NFTA_SET_ELEM_OBJREF] || @@ -5899,6 +5945,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, nla[NFTA_SET_ELEM_EXPIRATION] || nla[NFTA_SET_ELEM_USERDATA] || nla[NFTA_SET_ELEM_EXPR] || + nla[NFTA_SET_ELEM_KEY_END] || nla[NFTA_SET_ELEM_EXPRESSIONS])) return -EINVAL; @@ -6029,10 +6076,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, } if (nla[NFTA_SET_ELEM_OBJREF] != NULL) { - if (!(set->flags & NFT_SET_OBJECT)) { - err = -EINVAL; - goto err_parse_key_end; - } obj = nft_obj_lookup(ctx->net, ctx->table, nla[NFTA_SET_ELEM_OBJREF], set->objtype, genmask); @@ -6325,6 +6368,9 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL)) return -EINVAL; + if (!nft_setelem_valid_key_end(set, nla, flags)) + return -EINVAL; + nft_set_ext_prepare(&tmpl); if (flags != 0) { @@ -6941,7 +6987,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); nft_net = nft_pernet(net); - cb->seq = nft_net->base_seq; + cb->seq = READ_ONCE(nft_net->base_seq); list_for_each_entry_rcu(table, &nft_net->tables, list) { if (family != NFPROTO_UNSPEC && family != table->family) @@ -7873,7 +7919,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb, rcu_read_lock(); nft_net = nft_pernet(net); - cb->seq = nft_net->base_seq; + cb->seq = READ_ONCE(nft_net->base_seq); list_for_each_entry_rcu(table, &nft_net->tables, list) { if (family != NFPROTO_UNSPEC && family != table->family) @@ -8806,6 +8852,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) struct nft_trans_elem *te; struct nft_chain *chain; struct nft_table *table; + unsigned int base_seq; LIST_HEAD(adl); int err; @@ -8855,9 +8902,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) * Bump generation counter, invalidate any dump in progress. * Cannot fail after this point. */ - while (++nft_net->base_seq == 0) + base_seq = READ_ONCE(nft_net->base_seq); + while (++base_seq == 0) ; + WRITE_ONCE(nft_net->base_seq, base_seq); + /* step 3. Start new generation, rules_gen_X now in use. */ net->nft.gencursor = nft_gencursor_next(net); @@ -9419,13 +9469,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, break; } } - - cond_resched(); } list_for_each_entry(set, &ctx->table->sets, list) { - cond_resched(); - if (!nft_is_active_next(ctx->net, set)) continue; if (!(set->flags & NFT_SET_MAP) || @@ -9667,6 +9713,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, return PTR_ERR(chain); if (nft_is_base_chain(chain)) return -EOPNOTSUPP; + if (nft_chain_is_bound(chain)) + return -EINVAL; if (desc->flags & NFT_DATA_DESC_SETELEM && chain->flags & NFT_CHAIN_BINDING) return -EINVAL; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index c24b1240908f..9c44518cb70f 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -44,6 +44,10 @@ MODULE_DESCRIPTION("Netfilter messages via netlink socket"); static unsigned int nfnetlink_pernet_id __read_mostly; +#ifdef CONFIG_NF_CONNTRACK_EVENTS +static DEFINE_SPINLOCK(nfnl_grp_active_lock); +#endif + struct nfnl_net { struct sock *nfnl; }; @@ -654,6 +658,44 @@ static void nfnetlink_rcv(struct sk_buff *skb) netlink_rcv_skb(skb, nfnetlink_rcv_msg); } +static void nfnetlink_bind_event(struct net *net, unsigned int group) +{ +#ifdef CONFIG_NF_CONNTRACK_EVENTS + int type, group_bit; + u8 v; + + /* All NFNLGRP_CONNTRACK_* group bits fit into u8. + * The other groups are not relevant and can be ignored. + */ + if (group >= 8) + return; + + type = nfnl_group2type[group]; + + switch (type) { + case NFNL_SUBSYS_CTNETLINK: + break; + case NFNL_SUBSYS_CTNETLINK_EXP: + break; + default: + return; + } + + group_bit = (1 << group); + + spin_lock(&nfnl_grp_active_lock); + v = READ_ONCE(net->ct.ctnetlink_has_listener); + if ((v & group_bit) == 0) { + v |= group_bit; + + /* read concurrently without nfnl_grp_active_lock held. */ + WRITE_ONCE(net->ct.ctnetlink_has_listener, v); + } + + spin_unlock(&nfnl_grp_active_lock); +#endif +} + static int nfnetlink_bind(struct net *net, int group) { const struct nfnetlink_subsystem *ss; @@ -670,28 +712,45 @@ static int nfnetlink_bind(struct net *net, int group) if (!ss) request_module_nowait("nfnetlink-subsys-%d", type); -#ifdef CONFIG_NF_CONNTRACK_EVENTS - if (type == NFNL_SUBSYS_CTNETLINK) { - nfnl_lock(NFNL_SUBSYS_CTNETLINK); - WRITE_ONCE(net->ct.ctnetlink_has_listener, true); - nfnl_unlock(NFNL_SUBSYS_CTNETLINK); - } -#endif + nfnetlink_bind_event(net, group); return 0; } static void nfnetlink_unbind(struct net *net, int group) { #ifdef CONFIG_NF_CONNTRACK_EVENTS + int type, group_bit; + if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) return; - if (nfnl_group2type[group] == NFNL_SUBSYS_CTNETLINK) { - nfnl_lock(NFNL_SUBSYS_CTNETLINK); - if (!nfnetlink_has_listeners(net, group)) - WRITE_ONCE(net->ct.ctnetlink_has_listener, false); - nfnl_unlock(NFNL_SUBSYS_CTNETLINK); + type = nfnl_group2type[group]; + + switch (type) { + case NFNL_SUBSYS_CTNETLINK: + break; + case NFNL_SUBSYS_CTNETLINK_EXP: + break; + default: + return; + } + + /* ctnetlink_has_listener is u8 */ + if (group >= 8) + return; + + group_bit = (1 << group); + + spin_lock(&nfnl_grp_active_lock); + if (!nfnetlink_has_listeners(net, group)) { + u8 v = READ_ONCE(net->ct.ctnetlink_has_listener); + + v &= ~group_bit; + + /* read concurrently without nfnl_grp_active_lock held. */ + WRITE_ONCE(net->ct.ctnetlink_has_listener, v); } + spin_unlock(&nfnl_grp_active_lock); #endif } diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c index 71e29adac48b..8120aadf6a0f 100644 --- a/net/netfilter/nfnetlink_hook.c +++ b/net/netfilter/nfnetlink_hook.c @@ -215,13 +215,6 @@ nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *de hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); #endif break; -#if IS_ENABLED(CONFIG_DECNET) - case NFPROTO_DECNET: - if (hook >= ARRAY_SIZE(net->nf.hooks_decnet)) - return ERR_PTR(-EINVAL); - hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); - break; -#endif #if defined(CONFIG_NETFILTER_INGRESS) || defined(CONFIG_NETFILTER_EGRESS) case NFPROTO_NETDEV: if (hook >= NF_NETDEV_NUMHOOKS) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index b04995c3e17f..a3f01f209a53 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -1089,9 +1089,6 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx, if (err < 0) goto err_put_helper; - /* Avoid the bogus warning, helper will be assigned after CT init */ - nf_ct_set_auto_assign_helper_warned(ctx->net); - return 0; err_put_helper: diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c index 0053a697c931..adacf95b6e2b 100644 --- a/net/netfilter/nft_osf.c +++ b/net/netfilter/nft_osf.c @@ -51,7 +51,7 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs, snprintf(os_match, NFT_OSF_MAXGENRELEN, "%s:%s", data.genre, data.version); else - strlcpy(os_match, data.genre, NFT_OSF_MAXGENRELEN); + strscpy(os_match, data.genre, NFT_OSF_MAXGENRELEN); strncpy((char *)dest, os_match, NFT_OSF_MAXGENRELEN); } @@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) { - return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) | - (1 << NF_INET_PRE_ROUTING) | - (1 << NF_INET_FORWARD)); + unsigned int hooks; + + switch (ctx->family) { + case NFPROTO_IPV4: + case NFPROTO_IPV6: + case NFPROTO_INET: + hooks = (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_FORWARD); + break; + default: + return -EOPNOTSUPP; + } + + return nft_chain_validate_hooks(ctx->chain, hooks); } static bool nft_osf_reduce(struct nft_regs_track *track, diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 2e7ac007cb30..088244f9d838 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -173,10 +173,10 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = { [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 }, [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 }, [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 }, - [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 }, - [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 }, + [NFTA_PAYLOAD_OFFSET] = NLA_POLICY_MAX_BE(NLA_U32, 255), + [NFTA_PAYLOAD_LEN] = NLA_POLICY_MAX_BE(NLA_U32, 255), [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 }, - [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 }, + [NFTA_PAYLOAD_CSUM_OFFSET] = NLA_POLICY_MAX_BE(NLA_U32, 255), [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 }, }; @@ -740,17 +740,23 @@ static int nft_payload_set_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_payload_set *priv = nft_expr_priv(expr); + u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE; + int err; priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); if (tb[NFTA_PAYLOAD_CSUM_TYPE]) - priv->csum_type = - ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE])); - if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) - priv->csum_offset = - ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET])); + csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE])); + if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) { + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX, + &csum_offset); + if (err < 0) + return err; + + priv->csum_offset = csum_offset; + } if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) { u32 flags; @@ -761,7 +767,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx, priv->csum_flags = flags; } - switch (priv->csum_type) { + switch (csum_type) { case NFT_PAYLOAD_CSUM_NONE: case NFT_PAYLOAD_CSUM_INET: break; @@ -775,6 +781,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx, default: return -EOPNOTSUPP; } + priv->csum_type = csum_type; return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg, priv->len); @@ -833,6 +840,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx, { enum nft_payload_bases base; unsigned int offset, len; + int err; if (tb[NFTA_PAYLOAD_BASE] == NULL || tb[NFTA_PAYLOAD_OFFSET] == NULL || @@ -859,8 +867,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx, if (tb[NFTA_PAYLOAD_DREG] == NULL) return ERR_PTR(-EINVAL); - offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); - len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset); + if (err < 0) + return ERR_PTR(err); + + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len); + if (err < 0) + return ERR_PTR(err); if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER) diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index 68b2eed742df..62da25ad264b 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -312,6 +312,13 @@ static int nft_tproxy_dump(struct sk_buff *skb, return 0; } +static int nft_tproxy_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING); +} + static struct nft_expr_type nft_tproxy_type; static const struct nft_expr_ops nft_tproxy_ops = { .type = &nft_tproxy_type, @@ -321,6 +328,7 @@ static const struct nft_expr_ops nft_tproxy_ops = { .destroy = nft_tproxy_destroy, .dump = nft_tproxy_dump, .reduce = NFT_REDUCE_READONLY, + .validate = nft_tproxy_validate, }; static struct nft_expr_type nft_tproxy_type __read_mostly = { diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 5edaaded706d..983ade4be3b3 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -161,6 +161,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = { static struct nft_expr_type nft_tunnel_type __read_mostly = { .name = "tunnel", + .family = NFPROTO_NETDEV, .ops = &nft_tunnel_get_ops, .policy = nft_tunnel_policy, .maxattr = NFTA_TUNNEL_MAX, diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 54a489f16b17..470282cf3fae 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -766,7 +766,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, msize += off; m->u.user.match_size = msize; - strlcpy(name, match->name, sizeof(name)); + strscpy(name, match->name, sizeof(name)); module_put(match->me); strncpy(m->u.user.name, name, sizeof(m->u.user.name)); @@ -1146,7 +1146,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, tsize += off; t->u.user.target_size = tsize; - strlcpy(name, target->name, sizeof(name)); + strscpy(name, target->name, sizeof(name)); module_put(target->me); strncpy(t->u.user.name, name, sizeof(t->u.user.name)); @@ -1827,7 +1827,7 @@ int xt_proto_init(struct net *net, u_int8_t af) root_uid = make_kuid(net->user_ns, 0); root_gid = make_kgid(net->user_ns, 0); - strlcpy(buf, xt_prefix[af], sizeof(buf)); + strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops, sizeof(struct seq_net_private), @@ -1837,7 +1837,7 @@ int xt_proto_init(struct net *net, u_int8_t af) if (uid_valid(root_uid) && gid_valid(root_gid)) proc_set_user(proc, root_uid, root_gid); - strlcpy(buf, xt_prefix[af], sizeof(buf)); + strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); proc = proc_create_seq_private(buf, 0440, net->proc_net, &xt_match_seq_ops, sizeof(struct nf_mttg_trav), @@ -1847,7 +1847,7 @@ int xt_proto_init(struct net *net, u_int8_t af) if (uid_valid(root_uid) && gid_valid(root_gid)) proc_set_user(proc, root_uid, root_gid); - strlcpy(buf, xt_prefix[af], sizeof(buf)); + strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TARGETS, sizeof(buf)); proc = proc_create_seq_private(buf, 0440, net->proc_net, &xt_target_seq_ops, sizeof(struct nf_mttg_trav), @@ -1862,12 +1862,12 @@ int xt_proto_init(struct net *net, u_int8_t af) #ifdef CONFIG_PROC_FS out_remove_matches: - strlcpy(buf, xt_prefix[af], sizeof(buf)); + strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); remove_proc_entry(buf, net->proc_net); out_remove_tables: - strlcpy(buf, xt_prefix[af], sizeof(buf)); + strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); remove_proc_entry(buf, net->proc_net); out: @@ -1881,15 +1881,15 @@ void xt_proto_fini(struct net *net, u_int8_t af) #ifdef CONFIG_PROC_FS char buf[XT_FUNCTION_MAXNAMELEN]; - strlcpy(buf, xt_prefix[af], sizeof(buf)); + strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); remove_proc_entry(buf, net->proc_net); - strlcpy(buf, xt_prefix[af], sizeof(buf)); + strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TARGETS, sizeof(buf)); remove_proc_entry(buf, net->proc_net); - strlcpy(buf, xt_prefix[af], sizeof(buf)); + strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); remove_proc_entry(buf, net->proc_net); #endif /*CONFIG_PROC_FS*/ diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 8aec1b529364..80f6624e2355 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -144,7 +144,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) goto err1; gnet_stats_basic_sync_init(&est->bstats); - strlcpy(est->name, info->name, sizeof(est->name)); + strscpy(est->name, info->name, sizeof(est->name)); spin_lock_init(&est->lock); est->refcnt = 1; est->params.interval = info->interval; diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c index 91a19c3ea1a3..f1d5b8465217 100644 --- a/net/netlabel/netlabel_calipso.c +++ b/net/netlabel/netlabel_calipso.c @@ -344,6 +344,7 @@ static struct genl_family netlbl_calipso_gnl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = netlbl_calipso_ops, .n_small_ops = ARRAY_SIZE(netlbl_calipso_ops), + .resv_start_op = NLBL_CALIPSO_C_LISTALL + 1, }; /* NetLabel Generic NETLINK Protocol Functions diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index 894e6b8f1a86..fa08ee75ac06 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c @@ -767,6 +767,7 @@ static struct genl_family netlbl_cipsov4_gnl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = netlbl_cipsov4_ops, .n_small_ops = ARRAY_SIZE(netlbl_cipsov4_ops), + .resv_start_op = NLBL_CIPSOV4_C_LISTALL + 1, }; /* diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 032b7d7b32c7..689eaa2afbec 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c @@ -826,6 +826,7 @@ static struct genl_family netlbl_mgmt_gnl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = netlbl_mgmt_genl_ops, .n_small_ops = ARRAY_SIZE(netlbl_mgmt_genl_ops), + .resv_start_op = NLBL_MGMT_C_VERSION + 1, }; /* diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 0555dffd80e0..9996883bf2b7 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1374,6 +1374,7 @@ static struct genl_family netlbl_unlabel_gnl_family __ro_after_init = { .module = THIS_MODULE, .small_ops = netlbl_unlabel_genl_ops, .n_small_ops = ARRAY_SIZE(netlbl_unlabel_genl_ops), + .resv_start_op = NLBL_UNLABEL_C_STATICLISTDEF + 1, }; /* diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 0cd91f813a3b..a662e8a5ff84 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2400,6 +2400,69 @@ error_free: } EXPORT_SYMBOL(__netlink_dump_start); +static size_t +netlink_ack_tlv_len(struct netlink_sock *nlk, int err, + const struct netlink_ext_ack *extack) +{ + size_t tlvlen; + + if (!extack || !(nlk->flags & NETLINK_F_EXT_ACK)) + return 0; + + tlvlen = 0; + if (extack->_msg) + tlvlen += nla_total_size(strlen(extack->_msg) + 1); + if (extack->cookie_len) + tlvlen += nla_total_size(extack->cookie_len); + + /* Following attributes are only reported as error (not warning) */ + if (!err) + return tlvlen; + + if (extack->bad_attr) + tlvlen += nla_total_size(sizeof(u32)); + if (extack->policy) + tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy); + if (extack->miss_type) + tlvlen += nla_total_size(sizeof(u32)); + if (extack->miss_nest) + tlvlen += nla_total_size(sizeof(u32)); + + return tlvlen; +} + +static void +netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb, + struct nlmsghdr *nlh, int err, + const struct netlink_ext_ack *extack) +{ + if (extack->_msg) + WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg)); + if (extack->cookie_len) + WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, + extack->cookie_len, extack->cookie)); + + if (!err) + return; + + if (extack->bad_attr && + !WARN_ON((u8 *)extack->bad_attr < in_skb->data || + (u8 *)extack->bad_attr >= in_skb->data + in_skb->len)) + WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, + (u8 *)extack->bad_attr - (u8 *)nlh)); + if (extack->policy) + netlink_policy_dump_write_attr(skb, extack->policy, + NLMSGERR_ATTR_POLICY); + if (extack->miss_type) + WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE, + extack->miss_type)); + if (extack->miss_nest && + !WARN_ON((u8 *)extack->miss_nest < in_skb->data || + (u8 *)extack->miss_nest > in_skb->data + in_skb->len)) + WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST, + (u8 *)extack->miss_nest - (u8 *)nlh)); +} + void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, const struct netlink_ext_ack *extack) { @@ -2407,29 +2470,20 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, struct nlmsghdr *rep; struct nlmsgerr *errmsg; size_t payload = sizeof(*errmsg); - size_t tlvlen = 0; struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); unsigned int flags = 0; - bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK; + size_t tlvlen; /* Error messages get the original request appened, unless the user * requests to cap the error message, and get extra error data if * requested. */ - if (nlk_has_extack && extack && extack->_msg) - tlvlen += nla_total_size(strlen(extack->_msg) + 1); - if (err && !(nlk->flags & NETLINK_F_CAP_ACK)) payload += nlmsg_len(nlh); else flags |= NLM_F_CAPPED; - if (err && nlk_has_extack && extack && extack->bad_attr) - tlvlen += nla_total_size(sizeof(u32)); - if (nlk_has_extack && extack && extack->cookie_len) - tlvlen += nla_total_size(extack->cookie_len); - if (err && nlk_has_extack && extack && extack->policy) - tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy); + tlvlen = netlink_ack_tlv_len(nlk, err, extack); if (tlvlen) flags |= NLM_F_ACK_TLVS; @@ -2440,31 +2494,16 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, return; } - rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, - NLMSG_ERROR, payload, flags); + rep = nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, + NLMSG_ERROR, payload, flags); errmsg = nlmsg_data(rep); errmsg->error = err; - memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); + unsafe_memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) + ? nlh->nlmsg_len : sizeof(*nlh), + /* Bounds checked by the skb layer. */); - if (nlk_has_extack && extack) { - if (extack->_msg) { - WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, - extack->_msg)); - } - if (err && extack->bad_attr && - !WARN_ON((u8 *)extack->bad_attr < in_skb->data || - (u8 *)extack->bad_attr >= in_skb->data + - in_skb->len)) - WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, - (u8 *)extack->bad_attr - - (u8 *)nlh)); - if (extack->cookie_len) - WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, - extack->cookie_len, extack->cookie)); - if (extack->policy) - netlink_policy_dump_write_attr(skb, extack->policy, - NLMSGERR_ATTR_POLICY); - } + if (tlvlen) + netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack); nlmsg_end(skb, rep); diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 1afca2a6c2ac..7c136de117eb 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -757,6 +757,9 @@ static int genl_family_rcv_msg(const struct genl_family *family, if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) return -EINVAL; + if (hdr->cmd >= family->resv_start_op && hdr->reserved) + return -EINVAL; + if (genl_get_cmd(hdr->cmd, family, &op)) return -EOPNOTSUPP; @@ -1174,13 +1177,17 @@ static int ctrl_dumppolicy_start(struct netlink_callback *cb) op.policy, op.maxattr); if (err) - return err; + goto err_free_state; } } if (!ctx->state) return -ENODATA; return 0; + +err_free_state: + netlink_policy_dump_free(ctx->state); + return err; } static void *ctrl_dumppolicy_prep(struct sk_buff *skb, @@ -1344,6 +1351,7 @@ static struct genl_family genl_ctrl __ro_after_init = { .module = THIS_MODULE, .ops = genl_ctrl_ops, .n_ops = ARRAY_SIZE(genl_ctrl_ops), + .resv_start_op = CTRL_CMD_GETPOLICY + 1, .mcgrps = genl_ctrl_groups, .n_mcgrps = ARRAY_SIZE(genl_ctrl_groups), .id = GENL_ID_CTRL, @@ -1358,7 +1366,7 @@ static int genl_bind(struct net *net, int group) unsigned int id; int ret = 0; - genl_lock_all(); + down_read(&cb_lock); idr_for_each_entry(&genl_fam_idr, family, id) { const struct genl_multicast_group *grp; @@ -1379,7 +1387,7 @@ static int genl_bind(struct net *net, int group) break; } - genl_unlock_all(); + up_read(&cb_lock); return ret; } diff --git a/net/netlink/policy.c b/net/netlink/policy.c index 8d7c900e27f4..87e3de0fde89 100644 --- a/net/netlink/policy.c +++ b/net/netlink/policy.c @@ -144,7 +144,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate, err = add_policy(&state, policy, maxtype); if (err) - return err; + goto err_try_undo; for (policy_idx = 0; policy_idx < state->n_alloc && state->policies[policy_idx].policy; @@ -164,7 +164,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate, policy[type].nested_policy, policy[type].len); if (err) - return err; + goto err_try_undo; break; default: break; @@ -174,6 +174,16 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate, *pstate = state; return 0; + +err_try_undo: + /* Try to preserve reasonable unwind semantics - if we're starting from + * scratch clean up fully, otherwise record what we got and caller will. + */ + if (!*pstate) + netlink_policy_dump_free(state); + else + *pstate = state; + return err; } static bool diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 7c62417ccfd7..9d91087b9399 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1783,6 +1783,7 @@ static struct genl_family nfc_genl_family __ro_after_init = { .module = THIS_MODULE, .ops = nfc_genl_ops, .n_ops = ARRAY_SIZE(nfc_genl_ops), + .resv_start_op = NFC_CMD_DEACTIVATE_TARGET + 1, .mcgrps = nfc_genl_mcgrps, .n_mcgrps = ARRAY_SIZE(nfc_genl_mcgrps), }; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 4e70df91d0f2..48e8f5c29b67 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -2283,6 +2283,7 @@ struct genl_family dp_ct_limit_genl_family __ro_after_init = { .parallel_ops = true, .small_ops = ct_limit_genl_ops, .n_small_ops = ARRAY_SIZE(ct_limit_genl_ops), + .resv_start_op = OVS_CT_LIMIT_CMD_GET + 1, .mcgrps = &ovs_ct_limit_multicast_group, .n_mcgrps = 1, .module = THIS_MODULE, diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 7e8a39a35627..c8a9075ddd0a 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -252,10 +252,17 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) upcall.mru = OVS_CB(skb)->mru; error = ovs_dp_upcall(dp, skb, key, &upcall, 0); - if (unlikely(error)) - kfree_skb(skb); - else + switch (error) { + case 0: + case -EAGAIN: + case -ERESTARTSYS: + case -EINTR: consume_skb(skb); + break; + default: + kfree_skb(skb); + break; + } stats_counter = &stats->n_missed; goto out; } @@ -551,8 +558,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, out: if (err) skb_tx_error(skb); - kfree_skb(user_skb); - kfree_skb(nskb); + consume_skb(user_skb); + consume_skb(nskb); + return err; } @@ -684,6 +692,7 @@ static struct genl_family dp_packet_genl_family __ro_after_init = { .parallel_ops = true, .small_ops = dp_packet_genl_ops, .n_small_ops = ARRAY_SIZE(dp_packet_genl_ops), + .resv_start_op = OVS_PACKET_CMD_EXECUTE + 1, .module = THIS_MODULE, }; @@ -1501,6 +1510,7 @@ static struct genl_family dp_flow_genl_family __ro_after_init = { .parallel_ops = true, .small_ops = dp_flow_genl_ops, .n_small_ops = ARRAY_SIZE(dp_flow_genl_ops), + .resv_start_op = OVS_FLOW_CMD_SET + 1, .mcgrps = &ovs_dp_flow_multicast_group, .n_mcgrps = 1, .module = THIS_MODULE, @@ -1515,6 +1525,7 @@ static size_t ovs_dp_cmd_msg_size(void) msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats)); msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */ + msgsize += nla_total_size(sizeof(u32) * nr_cpu_ids); /* OVS_DP_ATTR_PER_CPU_PIDS */ return msgsize; } @@ -1526,7 +1537,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, struct ovs_header *ovs_header; struct ovs_dp_stats dp_stats; struct ovs_dp_megaflow_stats dp_megaflow_stats; - int err; + struct dp_nlsk_pids *pids = ovsl_dereference(dp->upcall_portids); + int err, pids_len; ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, flags, cmd); @@ -1556,6 +1568,12 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, ovs_flow_tbl_masks_cache_size(&dp->table))) goto nla_put_failure; + if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU && pids) { + pids_len = min(pids->n_pids, nr_cpu_ids) * sizeof(u32); + if (nla_put(skb, OVS_DP_ATTR_PER_CPU_PIDS, pids_len, &pids->pids)) + goto nla_put_failure; + } + genlmsg_end(skb, ovs_header); return 0; @@ -1779,6 +1797,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.dp = dp; parms.port_no = OVSP_LOCAL; parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID]; + parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX] + ? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0; /* So far only local changes have been made, now need the lock. */ ovs_lock(); @@ -1802,7 +1822,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_dp_reset_user_features(skb, info); } - goto err_unlock_and_destroy_meters; + goto err_destroy_portids; } err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, @@ -1817,6 +1837,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_notify(&dp_datapath_genl_family, reply, info); return 0; +err_destroy_portids: + kfree(rcu_dereference_raw(dp->upcall_portids)); err_unlock_and_destroy_meters: ovs_unlock(); ovs_meters_exit(dp); @@ -1996,6 +2018,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0, PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)), + [OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 }, }; static const struct genl_small_ops dp_datapath_genl_ops[] = { @@ -2032,6 +2055,7 @@ static struct genl_family dp_datapath_genl_family __ro_after_init = { .parallel_ops = true, .small_ops = dp_datapath_genl_ops, .n_small_ops = ARRAY_SIZE(dp_datapath_genl_ops), + .resv_start_op = OVS_DP_CMD_SET + 1, .mcgrps = &ovs_dp_datapath_multicast_group, .n_mcgrps = 1, .module = THIS_MODULE, @@ -2199,7 +2223,10 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || !a[OVS_VPORT_ATTR_UPCALL_PID]) return -EINVAL; - if (a[OVS_VPORT_ATTR_IFINDEX]) + + parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); + + if (a[OVS_VPORT_ATTR_IFINDEX] && parms.type != OVS_VPORT_TYPE_INTERNAL) return -EOPNOTSUPP; port_no = a[OVS_VPORT_ATTR_PORT_NO] @@ -2236,11 +2263,12 @@ restart: } parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]); - parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); parms.options = a[OVS_VPORT_ATTR_OPTIONS]; parms.dp = dp; parms.port_no = port_no; parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID]; + parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX] + ? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0; vport = new_vport(&parms); err = PTR_ERR(vport); diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 4c09cf8a0ab2..4a07ab094a84 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -3304,7 +3304,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, /* Disallow subsequent L2.5+ set actions and mpls_pop * actions once the last MPLS label in the packet is - * is popped as there is no check here to ensure that + * popped as there is no check here to ensure that * the new eth type is valid and thus set actions could * write off the end of the packet or otherwise corrupt * it. diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index 04a060ac7fdf..51111a9009bd 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -720,6 +720,7 @@ struct genl_family dp_meter_genl_family __ro_after_init = { .parallel_ops = true, .small_ops = dp_meter_genl_ops, .n_small_ops = ARRAY_SIZE(dp_meter_genl_ops), + .resv_start_op = OVS_METER_CMD_GET + 1, .mcgrps = &ovs_meter_multicast_group, .n_mcgrps = 1, .module = THIS_MODULE, diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 5b2ee9c1c00b..35f42c9821c2 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -65,7 +65,7 @@ static int internal_dev_stop(struct net_device *netdev) static void internal_dev_getinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, "openvswitch", sizeof(info->driver)); + strscpy(info->driver, "openvswitch", sizeof(info->driver)); } static const struct ethtool_ops internal_dev_ethtool_ops = { @@ -147,6 +147,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) } dev_net_set(vport->dev, ovs_dp_get_net(vport->dp)); + dev->ifindex = parms->desired_ifindex; internal_dev = internal_dev_priv(vport->dev); internal_dev->vport = vport; diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 9de5030d9801..7d276f60c000 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -90,12 +90,14 @@ struct vport { * @type: New vport's type. * @options: %OVS_VPORT_ATTR_OPTIONS attribute from Netlink message, %NULL if * none was supplied. + * @desired_ifindex: New vport's ifindex. * @dp: New vport's datapath. * @port_no: New vport's port number. */ struct vport_parms { const char *name; enum ovs_vport_type type; + int desired_ifindex; struct nlattr *options; /* For ovs_vport_alloc(). */ diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5cbe07116e04..7de46d831349 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1905,7 +1905,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, */ spkt->spkt_family = dev->type; - strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); + strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* @@ -3565,7 +3565,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); if (dev) - strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); + strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); return sizeof(*uaddr); diff --git a/net/psample/psample.c b/net/psample/psample.c index 118d5d2a81a0..81a794e36f53 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -115,6 +115,7 @@ static struct genl_family psample_nl_family __ro_after_init = { .mcgrps = psample_nl_mcgrps, .small_ops = psample_nl_ops, .n_small_ops = ARRAY_SIZE(psample_nl_ops), + .resv_start_op = PSAMPLE_CMD_GET_GROUP + 1, .n_mcgrps = ARRAY_SIZE(psample_nl_mcgrps), }; diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c index 18196e1c8c2f..9ced13c0627a 100644 --- a/net/qrtr/mhi.c +++ b/net/qrtr/mhi.c @@ -78,11 +78,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, struct qrtr_mhi_dev *qdev; int rc; - /* start channels */ - rc = mhi_prepare_for_transfer_autoqueue(mhi_dev); - if (rc) - return rc; - qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL); if (!qdev) return -ENOMEM; @@ -96,6 +91,13 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, if (rc) return rc; + /* start channels */ + rc = mhi_prepare_for_transfer_autoqueue(mhi_dev); + if (rc) { + qrtr_endpoint_unregister(&qdev->ep); + return rc; + } + dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n"); return 0; diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index b239120dd9ca..3ff6995244e5 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -894,7 +894,7 @@ module_exit(rds_exit); u32 rds_gen_num; -static int rds_init(void) +static int __init rds_init(void) { int ret; diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 6fdedd9dbbc2..cfbf0e129cba 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -363,6 +363,7 @@ static int acquire_refill(struct rds_connection *conn) static void release_refill(struct rds_connection *conn) { clear_bit(RDS_RECV_REFILL, &conn->c_flags); + smp_mb__after_atomic(); /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a * hot path and finding waiters is very rare. We don't want to walk diff --git a/net/rds/message.c b/net/rds/message.c index d74be4e3f3fa..44dbc612ef54 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -354,7 +354,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in for (i = 0; i < rm->data.op_nents; ++i) { sg_set_page(&rm->data.op_sg[i], - virt_to_page(page_addrs[i]), + virt_to_page((void *)page_addrs[i]), PAGE_SIZE, 0); } diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index a9e4ff948a7d..d36f3f6b4351 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -291,7 +291,7 @@ static void rds_rdma_listen_stop(void) #endif } -static int rds_rdma_init(void) +static int __init rds_rdma_init(void) { int ret; @@ -307,7 +307,7 @@ out: } module_init(rds_rdma_init); -static void rds_rdma_exit(void) +static void __exit rds_rdma_exit(void) { /* stop listening first to ensure no new connections are attempted */ rds_rdma_listen_stop(); diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 73ee2771093d..d8754366506a 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -712,7 +712,7 @@ static void rds_tcp_exit(void) } module_exit(rds_tcp_exit); -static int rds_tcp_init(void) +static int __init rds_tcp_init(void) { int ret; diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 11c45c8c6c16..036d92c0ad79 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c @@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused) } if (frametype == ROSE_CALL_REQUEST) { - if (!rose_loopback_neigh->dev) { + if (!rose_loopback_neigh->dev && + !rose_loopback_neigh->loopback) { kfree_skb(skb); continue; } diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 571436064cd6..1ad0ec5afb50 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -782,7 +782,6 @@ void rxrpc_delete_call_timer(struct rxrpc_call *call); */ extern const char *const rxrpc_call_states[]; extern const char *const rxrpc_call_completions[]; -extern unsigned int rxrpc_max_call_lifetime; extern struct kmem_cache *rxrpc_call_jar; struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); @@ -982,6 +981,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *); /* * peer_event.c */ +void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset); void rxrpc_error_report(struct sock *); void rxrpc_peer_keepalive_worker(struct work_struct *); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index f8ecad2b730e..2a93e7b5fbd0 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -166,7 +166,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); now = ktime_get_real(); - max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j)); + max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j)); spin_lock_bh(&call->lock); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 84d0a4109645..6401cdf7a624 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -285,8 +285,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, _enter("%p,%lx", rx, p->user_call_ID); limiter = rxrpc_get_call_slot(p, gfp); - if (!limiter) + if (!limiter) { + release_sock(&rx->sk); return ERR_PTR(-ERESTARTSYS); + } call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id); if (IS_ERR(call)) { diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 96ecb7356c0f..38ea98ff426b 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -137,6 +137,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) tuncfg.encap_type = UDP_ENCAP_RXRPC; tuncfg.encap_rcv = rxrpc_input_packet; + tuncfg.encap_err_rcv = rxrpc_encap_err_rcv; tuncfg.sk_user_data = local; setup_udp_tunnel_sock(net, local->socket, &tuncfg); @@ -405,6 +406,9 @@ static void rxrpc_local_processor(struct work_struct *work) container_of(work, struct rxrpc_local, processor); bool again; + if (local->dead) + return; + trace_rxrpc_local(local->debug_id, rxrpc_local_processing, refcount_read(&local->ref), NULL); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index be032850ae8c..32561e9567fe 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -16,22 +16,105 @@ #include <net/sock.h> #include <net/af_rxrpc.h> #include <net/ip.h> +#include <net/icmp.h> #include "ar-internal.h" +static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int); static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); static void rxrpc_distribute_error(struct rxrpc_peer *, int, enum rxrpc_call_completion); /* - * Find the peer associated with an ICMP packet. + * Find the peer associated with an ICMPv4 packet. */ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, - const struct sk_buff *skb, + struct sk_buff *skb, + unsigned int udp_offset, + unsigned int *info, struct sockaddr_rxrpc *srx) { - struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); + struct iphdr *ip, *ip0 = ip_hdr(skb); + struct icmphdr *icmp = icmp_hdr(skb); + struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset); - _enter(""); + _enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code); + + switch (icmp->type) { + case ICMP_DEST_UNREACH: + *info = ntohs(icmp->un.frag.mtu); + fallthrough; + case ICMP_TIME_EXCEEDED: + case ICMP_PARAMETERPROB: + ip = (struct iphdr *)((void *)icmp + 8); + break; + default: + return NULL; + } + + memset(srx, 0, sizeof(*srx)); + srx->transport_type = local->srx.transport_type; + srx->transport_len = local->srx.transport_len; + srx->transport.family = local->srx.transport.family; + + /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice + * versa? + */ + switch (srx->transport.family) { + case AF_INET: + srx->transport_len = sizeof(srx->transport.sin); + srx->transport.family = AF_INET; + srx->transport.sin.sin_port = udp->dest; + memcpy(&srx->transport.sin.sin_addr, &ip->daddr, + sizeof(struct in_addr)); + break; + +#ifdef CONFIG_AF_RXRPC_IPV6 + case AF_INET6: + srx->transport_len = sizeof(srx->transport.sin); + srx->transport.family = AF_INET; + srx->transport.sin.sin_port = udp->dest; + memcpy(&srx->transport.sin.sin_addr, &ip->daddr, + sizeof(struct in_addr)); + break; +#endif + + default: + WARN_ON_ONCE(1); + return NULL; + } + + _net("ICMP {%pISp}", &srx->transport); + return rxrpc_lookup_peer_rcu(local, srx); +} + +#ifdef CONFIG_AF_RXRPC_IPV6 +/* + * Find the peer associated with an ICMPv6 packet. + */ +static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local, + struct sk_buff *skb, + unsigned int udp_offset, + unsigned int *info, + struct sockaddr_rxrpc *srx) +{ + struct icmp6hdr *icmp = icmp6_hdr(skb); + struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb); + struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset); + + _enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code); + + switch (icmp->icmp6_type) { + case ICMPV6_DEST_UNREACH: + *info = ntohl(icmp->icmp6_mtu); + fallthrough; + case ICMPV6_PKT_TOOBIG: + case ICMPV6_TIME_EXCEED: + case ICMPV6_PARAMPROB: + ip = (struct ipv6hdr *)((void *)icmp + 8); + break; + default: + return NULL; + } memset(srx, 0, sizeof(*srx)); srx->transport_type = local->srx.transport_type; @@ -43,6 +126,165 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, */ switch (srx->transport.family) { case AF_INET: + _net("Rx ICMP6 on v4 sock"); + srx->transport_len = sizeof(srx->transport.sin); + srx->transport.family = AF_INET; + srx->transport.sin.sin_port = udp->dest; + memcpy(&srx->transport.sin.sin_addr, + &ip->daddr.s6_addr32[3], sizeof(struct in_addr)); + break; + case AF_INET6: + _net("Rx ICMP6"); + srx->transport.sin.sin_port = udp->dest; + memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr, + sizeof(struct in6_addr)); + break; + default: + WARN_ON_ONCE(1); + return NULL; + } + + _net("ICMP {%pISp}", &srx->transport); + return rxrpc_lookup_peer_rcu(local, srx); +} +#endif /* CONFIG_AF_RXRPC_IPV6 */ + +/* + * Handle an error received on the local endpoint as a tunnel. + */ +void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, + unsigned int udp_offset) +{ + struct sock_extended_err ee; + struct sockaddr_rxrpc srx; + struct rxrpc_local *local; + struct rxrpc_peer *peer; + unsigned int info = 0; + int err; + u8 version = ip_hdr(skb)->version; + u8 type = icmp_hdr(skb)->type; + u8 code = icmp_hdr(skb)->code; + + rcu_read_lock(); + local = rcu_dereference_sk_user_data(sk); + if (unlikely(!local)) { + rcu_read_unlock(); + return; + } + + rxrpc_new_skb(skb, rxrpc_skb_received); + + switch (ip_hdr(skb)->version) { + case IPVERSION: + peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset, + &info, &srx); + break; +#ifdef CONFIG_AF_RXRPC_IPV6 + case 6: + peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset, + &info, &srx); + break; +#endif + default: + rcu_read_unlock(); + return; + } + + if (peer && !rxrpc_get_peer_maybe(peer)) + peer = NULL; + if (!peer) { + rcu_read_unlock(); + return; + } + + memset(&ee, 0, sizeof(ee)); + + switch (version) { + case IPVERSION: + switch (type) { + case ICMP_DEST_UNREACH: + switch (code) { + case ICMP_FRAG_NEEDED: + rxrpc_adjust_mtu(peer, info); + rcu_read_unlock(); + rxrpc_put_peer(peer); + return; + default: + break; + } + + err = EHOSTUNREACH; + if (code <= NR_ICMP_UNREACH) { + /* Might want to do something different with + * non-fatal errors + */ + //harderr = icmp_err_convert[code].fatal; + err = icmp_err_convert[code].errno; + } + break; + + case ICMP_TIME_EXCEEDED: + err = EHOSTUNREACH; + break; + default: + err = EPROTO; + break; + } + + ee.ee_origin = SO_EE_ORIGIN_ICMP; + ee.ee_type = type; + ee.ee_code = code; + ee.ee_errno = err; + break; + +#ifdef CONFIG_AF_RXRPC_IPV6 + case 6: + switch (type) { + case ICMPV6_PKT_TOOBIG: + rxrpc_adjust_mtu(peer, info); + rcu_read_unlock(); + rxrpc_put_peer(peer); + return; + } + + icmpv6_err_convert(type, code, &err); + + if (err == EACCES) + err = EHOSTUNREACH; + + ee.ee_origin = SO_EE_ORIGIN_ICMP6; + ee.ee_type = type; + ee.ee_code = code; + ee.ee_errno = err; + break; +#endif + } + + trace_rxrpc_rx_icmp(peer, &ee, &srx); + + rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR); + rcu_read_unlock(); + rxrpc_put_peer(peer); +} + +/* + * Find the peer associated with a local error. + */ +static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local, + const struct sk_buff *skb, + struct sockaddr_rxrpc *srx) +{ + struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); + + _enter(""); + + memset(srx, 0, sizeof(*srx)); + srx->transport_type = local->srx.transport_type; + srx->transport_len = local->srx.transport_len; + srx->transport.family = local->srx.transport.family; + + switch (srx->transport.family) { + case AF_INET: srx->transport_len = sizeof(srx->transport.sin); srx->transport.family = AF_INET; srx->transport.sin.sin_port = serr->port; @@ -104,10 +346,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, /* * Handle an MTU/fragmentation problem. */ -static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr) +static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) { - u32 mtu = serr->ee.ee_info; - _net("Rx ICMP Fragmentation Needed (%d)", mtu); /* wind down the local interface MTU */ @@ -148,7 +388,7 @@ void rxrpc_error_report(struct sock *sk) struct sock_exterr_skb *serr; struct sockaddr_rxrpc srx; struct rxrpc_local *local; - struct rxrpc_peer *peer; + struct rxrpc_peer *peer = NULL; struct sk_buff *skb; rcu_read_lock(); @@ -172,41 +412,20 @@ void rxrpc_error_report(struct sock *sk) } rxrpc_new_skb(skb, rxrpc_skb_received); serr = SKB_EXT_ERR(skb); - if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { - _leave("UDP empty message"); - rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_freed); - return; - } - peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx); - if (peer && !rxrpc_get_peer_maybe(peer)) - peer = NULL; - if (!peer) { - rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_freed); - _leave(" [no peer]"); - return; - } - - trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); - - if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && - serr->ee.ee_type == ICMP_DEST_UNREACH && - serr->ee.ee_code == ICMP_FRAG_NEEDED)) { - rxrpc_adjust_mtu(peer, serr); - rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_freed); - rxrpc_put_peer(peer); - _leave(" [MTU update]"); - return; + if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) { + peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx); + if (peer && !rxrpc_get_peer_maybe(peer)) + peer = NULL; + if (peer) { + trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); + rxrpc_store_error(peer, serr); + } } - rxrpc_store_error(peer, serr); rcu_read_unlock(); rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_put_peer(peer); - _leave(""); } diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 250f23bc1c07..7e39c262fd79 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -771,46 +771,3 @@ call_complete: goto out; } EXPORT_SYMBOL(rxrpc_kernel_recv_data); - -/** - * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet - * @sock: The socket that the call exists on - * @call: The call to query - * @_ts: Where to put the timestamp - * - * Retrieve the timestamp from the first DATA packet of the reply if it is - * in the ring. Returns true if successful, false if not. - */ -bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call, - ktime_t *_ts) -{ - struct sk_buff *skb; - rxrpc_seq_t hard_ack, top, seq; - bool success = false; - - mutex_lock(&call->user_mutex); - - if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY) - goto out; - - hard_ack = call->rx_hard_ack; - if (hard_ack != 0) - goto out; - - seq = hard_ack + 1; - top = smp_load_acquire(&call->rx_top); - if (after(seq, top)) - goto out; - - skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK]; - if (!skb) - goto out; - - *_ts = skb_get_ktime(skb); - success = true; - -out: - mutex_unlock(&call->user_mutex); - return success; -} -EXPORT_SYMBOL(rxrpc_kernel_get_reply_time); diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 258917a714c8..78fa0524156f 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -540,7 +540,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, * directly into the target buffer. */ sg = _sg; - nsg = skb_shinfo(skb)->nr_frags; + nsg = skb_shinfo(skb)->nr_frags + 1; if (nsg <= 4) { nsg = 4; } else { diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 1d38e279e2ef..3c3a626459de 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -51,10 +51,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, return sock_intr_errno(*timeo); trace_rxrpc_transmit(call, rxrpc_transmit_wait); - mutex_unlock(&call->user_mutex); *timeo = schedule_timeout(*timeo); - if (mutex_lock_interruptible(&call->user_mutex) < 0) - return sock_intr_errno(*timeo); } } @@ -290,37 +287,48 @@ out: static int rxrpc_send_data(struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len, - rxrpc_notify_end_tx_t notify_end_tx) + rxrpc_notify_end_tx_t notify_end_tx, + bool *_dropped_lock) { struct rxrpc_skb_priv *sp; struct sk_buff *skb; struct sock *sk = &rx->sk; + enum rxrpc_call_state state; long timeo; - bool more; - int ret, copied; + bool more = msg->msg_flags & MSG_MORE; + int ret, copied = 0; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); /* this should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); +reload: + ret = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) - return -EPIPE; - - more = msg->msg_flags & MSG_MORE; - + goto maybe_error; + state = READ_ONCE(call->state); + ret = -ESHUTDOWN; + if (state >= RXRPC_CALL_COMPLETE) + goto maybe_error; + ret = -EPROTO; + if (state != RXRPC_CALL_CLIENT_SEND_REQUEST && + state != RXRPC_CALL_SERVER_ACK_REQUEST && + state != RXRPC_CALL_SERVER_SEND_REPLY) + goto maybe_error; + + ret = -EMSGSIZE; if (call->tx_total_len != -1) { - if (len > call->tx_total_len) - return -EMSGSIZE; - if (!more && len != call->tx_total_len) - return -EMSGSIZE; + if (len - copied > call->tx_total_len) + goto maybe_error; + if (!more && len - copied != call->tx_total_len) + goto maybe_error; } skb = call->tx_pending; call->tx_pending = NULL; rxrpc_see_skb(skb, rxrpc_skb_seen); - copied = 0; do { /* Check to see if there's a ping ACK to reply to. */ if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) @@ -331,16 +339,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, _debug("alloc"); - if (!rxrpc_check_tx_space(call, NULL)) { - ret = -EAGAIN; - if (msg->msg_flags & MSG_DONTWAIT) - goto maybe_error; - ret = rxrpc_wait_for_tx_window(rx, call, - &timeo, - msg->msg_flags & MSG_WAITALL); - if (ret < 0) - goto maybe_error; - } + if (!rxrpc_check_tx_space(call, NULL)) + goto wait_for_space; /* Work out the maximum size of a packet. Assume that * the security header is going to be in the padded @@ -468,6 +468,27 @@ maybe_error: efault: ret = -EFAULT; goto out; + +wait_for_space: + ret = -EAGAIN; + if (msg->msg_flags & MSG_DONTWAIT) + goto maybe_error; + mutex_unlock(&call->user_mutex); + *_dropped_lock = true; + ret = rxrpc_wait_for_tx_window(rx, call, &timeo, + msg->msg_flags & MSG_WAITALL); + if (ret < 0) + goto maybe_error; + if (call->interruptibility == RXRPC_INTERRUPTIBLE) { + if (mutex_lock_interruptible(&call->user_mutex) < 0) { + ret = sock_intr_errno(timeo); + goto maybe_error; + } + } else { + mutex_lock(&call->user_mutex); + } + *_dropped_lock = false; + goto reload; } /* @@ -629,6 +650,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) enum rxrpc_call_state state; struct rxrpc_call *call; unsigned long now, j; + bool dropped_lock = false; int ret; struct rxrpc_send_params p = { @@ -737,21 +759,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = rxrpc_send_abort_packet(call); } else if (p.command != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; - } else if (rxrpc_is_client_call(call) && - state != RXRPC_CALL_CLIENT_SEND_REQUEST) { - /* request phase complete for this client call */ - ret = -EPROTO; - } else if (rxrpc_is_service_call(call) && - state != RXRPC_CALL_SERVER_ACK_REQUEST && - state != RXRPC_CALL_SERVER_SEND_REPLY) { - /* Reply phase not begun or not complete for service call. */ - ret = -EPROTO; } else { - ret = rxrpc_send_data(rx, call, msg, len, NULL); + ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock); } out_put_unlock: - mutex_unlock(&call->user_mutex); + if (!dropped_lock) + mutex_unlock(&call->user_mutex); error_put: rxrpc_put_call(call, rxrpc_call_put); _leave(" = %d", ret); @@ -779,6 +793,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, struct msghdr *msg, size_t len, rxrpc_notify_end_tx_t notify_end_tx) { + bool dropped_lock = false; int ret; _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); @@ -796,7 +811,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, case RXRPC_CALL_SERVER_ACK_REQUEST: case RXRPC_CALL_SERVER_SEND_REPLY: ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, - notify_end_tx); + notify_end_tx, &dropped_lock); break; case RXRPC_CALL_COMPLETE: read_lock_bh(&call->state_lock); @@ -810,7 +825,8 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, break; } - mutex_unlock(&call->user_mutex); + if (!dropped_lock) + mutex_unlock(&call->user_mutex); _leave(" = %d", ret); return ret; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 817065aa2833..9b31a10cc639 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -676,6 +676,31 @@ int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) } EXPORT_SYMBOL(tcf_idr_search); +static int __tcf_generic_walker(struct net *net, struct sk_buff *skb, + struct netlink_callback *cb, int type, + const struct tc_action_ops *ops, + struct netlink_ext_ack *extack) +{ + struct tc_action_net *tn = net_generic(net, ops->net_id); + + if (unlikely(ops->walk)) + return ops->walk(net, skb, cb, type, ops, extack); + + return tcf_generic_walker(tn, skb, cb, type, ops, extack); +} + +static int __tcf_idr_search(struct net *net, + const struct tc_action_ops *ops, + struct tc_action **a, u32 index) +{ + struct tc_action_net *tn = net_generic(net, ops->net_id); + + if (unlikely(ops->lookup)) + return ops->lookup(net, a, index); + + return tcf_idr_search(tn, a, index); +} + static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) { struct tc_action *p; @@ -926,7 +951,7 @@ int tcf_register_action(struct tc_action_ops *act, struct tc_action_ops *a; int ret; - if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) + if (!act->act || !act->dump || !act->init) return -EINVAL; /* We have to register pernet ops before making the action ops visible, @@ -1638,7 +1663,7 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, goto err_out; } err = -ENOENT; - if (ops->lookup(net, &a, index) == 0) { + if (__tcf_idr_search(net, ops, &a, index) == 0) { NL_SET_ERR_MSG(extack, "TC action with specified index not found"); goto err_mod; } @@ -1703,7 +1728,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, goto out_module_put; } - err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack); + err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack); if (err <= 0) { nla_nest_cancel(skb, nest); goto out_module_put; @@ -2121,7 +2146,7 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) if (nest == NULL) goto out_module_put; - ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL); + ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL); if (ret < 0) goto out_module_put; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index fea2d78b9ddc..c5dbb68e6b78 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -29,7 +29,6 @@ struct tcf_bpf_cfg { bool is_ebpf; }; -static unsigned int bpf_net_id; static struct tc_action_ops act_bpf_ops; static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act, @@ -280,7 +279,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, bpf_net_id); + struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; struct tcf_chain *goto_ch = NULL; @@ -390,23 +389,6 @@ static void tcf_bpf_cleanup(struct tc_action *act) tcf_bpf_cfg_cleanup(&tmp); } -static int tcf_bpf_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, bpf_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, bpf_net_id); - - return tcf_idr_search(tn, a, index); -} - static struct tc_action_ops act_bpf_ops __read_mostly = { .kind = "bpf", .id = TCA_ID_BPF, @@ -415,27 +397,25 @@ static struct tc_action_ops act_bpf_ops __read_mostly = { .dump = tcf_bpf_dump, .cleanup = tcf_bpf_cleanup, .init = tcf_bpf_init, - .walk = tcf_bpf_walker, - .lookup = tcf_bpf_search, .size = sizeof(struct tcf_bpf), }; static __net_init int bpf_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, bpf_net_id); + struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id); return tc_action_net_init(net, tn, &act_bpf_ops); } static void __net_exit bpf_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, bpf_net_id); + tc_action_net_exit(net_list, act_bpf_ops.net_id); } static struct pernet_operations bpf_net_ops = { .init = bpf_init_net, .exit_batch = bpf_exit_net, - .id = &bpf_net_id, + .id = &act_bpf_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 09e2aafc8943..66b143bb04ac 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -25,7 +25,6 @@ #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_zones.h> -static unsigned int connmark_net_id; static struct tc_action_ops act_connmark_ops; static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, @@ -99,7 +98,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, connmark_net_id); + struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id); struct nlattr *tb[TCA_CONNMARK_MAX + 1]; bool bind = flags & TCA_ACT_FLAGS_BIND; struct tcf_chain *goto_ch = NULL; @@ -200,23 +199,6 @@ nla_put_failure: return -1; } -static int tcf_connmark_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, connmark_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, connmark_net_id); - - return tcf_idr_search(tn, a, index); -} - static struct tc_action_ops act_connmark_ops = { .kind = "connmark", .id = TCA_ID_CONNMARK, @@ -224,27 +206,25 @@ static struct tc_action_ops act_connmark_ops = { .act = tcf_connmark_act, .dump = tcf_connmark_dump, .init = tcf_connmark_init, - .walk = tcf_connmark_walker, - .lookup = tcf_connmark_search, .size = sizeof(struct tcf_connmark_info), }; static __net_init int connmark_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, connmark_net_id); + struct tc_action_net *tn = net_generic(net, act_connmark_ops.net_id); return tc_action_net_init(net, tn, &act_connmark_ops); } static void __net_exit connmark_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, connmark_net_id); + tc_action_net_exit(net_list, act_connmark_ops.net_id); } static struct pernet_operations connmark_net_ops = { .init = connmark_init_net, .exit_batch = connmark_exit_net, - .id = &connmark_net_id, + .id = &act_connmark_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 22847ee009ef..1366adf9b909 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -37,7 +37,6 @@ static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, }; -static unsigned int csum_net_id; static struct tc_action_ops act_csum_ops; static int tcf_csum_init(struct net *net, struct nlattr *nla, @@ -45,7 +44,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, csum_net_id); + struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct tcf_csum_params *params_new; struct nlattr *tb[TCA_CSUM_MAX + 1]; @@ -673,23 +672,6 @@ static void tcf_csum_cleanup(struct tc_action *a) kfree_rcu(params, rcu); } -static int tcf_csum_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, csum_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, csum_net_id); - - return tcf_idr_search(tn, a, index); -} - static size_t tcf_csum_get_fill_size(const struct tc_action *act) { return nla_total_size(sizeof(struct tc_csum)); @@ -722,8 +704,6 @@ static struct tc_action_ops act_csum_ops = { .dump = tcf_csum_dump, .init = tcf_csum_init, .cleanup = tcf_csum_cleanup, - .walk = tcf_csum_walker, - .lookup = tcf_csum_search, .get_fill_size = tcf_csum_get_fill_size, .offload_act_setup = tcf_csum_offload_act_setup, .size = sizeof(struct tcf_csum), @@ -731,20 +711,20 @@ static struct tc_action_ops act_csum_ops = { static __net_init int csum_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, csum_net_id); + struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id); return tc_action_net_init(net, tn, &act_csum_ops); } static void __net_exit csum_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, csum_net_id); + tc_action_net_exit(net_list, act_csum_ops.net_id); } static struct pernet_operations csum_net_ops = { .init = csum_init_net, .exit_batch = csum_exit_net, - .id = &csum_net_id, + .id = &act_csum_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index d55afb8d14be..4dd3fac42504 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -649,7 +649,6 @@ static void tcf_ct_flow_tables_uninit(void) } static struct tc_action_ops act_ct_ops; -static unsigned int ct_net_id; struct tc_ct_action_net { struct tc_action_net tn; /* Must be first */ @@ -1255,7 +1254,7 @@ static int tcf_ct_fill_params(struct net *net, struct nlattr **tb, struct netlink_ext_ack *extack) { - struct tc_ct_action_net *tn = net_generic(net, ct_net_id); + struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id); struct nf_conntrack_zone zone; struct nf_conn *tmpl; int err; @@ -1330,7 +1329,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, ct_net_id); + struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct tcf_ct_params *params = NULL; struct nlattr *tb[TCA_CT_MAX + 1]; @@ -1558,23 +1557,6 @@ nla_put_failure: return -1; } -static int tcf_ct_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, ct_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, ct_net_id); - - return tcf_idr_search(tn, a, index); -} - static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { @@ -1613,8 +1595,6 @@ static struct tc_action_ops act_ct_ops = { .dump = tcf_ct_dump, .init = tcf_ct_init, .cleanup = tcf_ct_cleanup, - .walk = tcf_ct_walker, - .lookup = tcf_ct_search, .stats_update = tcf_stats_update, .offload_act_setup = tcf_ct_offload_act_setup, .size = sizeof(struct tcf_ct), @@ -1623,7 +1603,7 @@ static struct tc_action_ops act_ct_ops = { static __net_init int ct_init_net(struct net *net) { unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8; - struct tc_ct_action_net *tn = net_generic(net, ct_net_id); + struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id); if (nf_connlabels_get(net, n_bits - 1)) { tn->labels = false; @@ -1641,20 +1621,20 @@ static void __net_exit ct_exit_net(struct list_head *net_list) rtnl_lock(); list_for_each_entry(net, net_list, exit_list) { - struct tc_ct_action_net *tn = net_generic(net, ct_net_id); + struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id); if (tn->labels) nf_connlabels_put(net); } rtnl_unlock(); - tc_action_net_exit(net_list, ct_net_id); + tc_action_net_exit(net_list, act_ct_ops.net_id); } static struct pernet_operations ct_net_ops = { .init = ct_init_net, .exit_batch = ct_exit_net, - .id = &ct_net_id, + .id = &act_ct_ops.net_id, .size = sizeof(struct tc_ct_action_net), }; diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 0281e45987a4..d4102f0a9abd 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -25,7 +25,6 @@ #include <net/netfilter/nf_conntrack_zones.h> static struct tc_action_ops act_ctinfo_ops; -static unsigned int ctinfo_net_id; static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca, struct tcf_ctinfo_params *cp, @@ -157,7 +156,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, ctinfo_net_id); + struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; u32 dscpmask = 0, dscpstatemask, index; struct nlattr *tb[TCA_CTINFO_MAX + 1]; @@ -342,23 +341,6 @@ nla_put_failure: return -1; } -static int tcf_ctinfo_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, ctinfo_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_ctinfo_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, ctinfo_net_id); - - return tcf_idr_search(tn, a, index); -} - static void tcf_ctinfo_cleanup(struct tc_action *a) { struct tcf_ctinfo *ci = to_ctinfo(a); @@ -377,27 +359,25 @@ static struct tc_action_ops act_ctinfo_ops = { .dump = tcf_ctinfo_dump, .init = tcf_ctinfo_init, .cleanup= tcf_ctinfo_cleanup, - .walk = tcf_ctinfo_walker, - .lookup = tcf_ctinfo_search, .size = sizeof(struct tcf_ctinfo), }; static __net_init int ctinfo_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, ctinfo_net_id); + struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id); return tc_action_net_init(net, tn, &act_ctinfo_ops); } static void __net_exit ctinfo_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, ctinfo_net_id); + tc_action_net_exit(net_list, act_ctinfo_ops.net_id); } static struct pernet_operations ctinfo_net_ops = { .init = ctinfo_init_net, .exit_batch = ctinfo_exit_net, - .id = &ctinfo_net_id, + .id = &act_ctinfo_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index ac29d1065232..abe1bcc5c797 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -19,7 +19,6 @@ #include <linux/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h> -static unsigned int gact_net_id; static struct tc_action_ops act_gact_ops; #ifdef CONFIG_GACT_PROB @@ -55,7 +54,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, gact_net_id); + struct tc_action_net *tn = net_generic(net, act_gact_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_GACT_MAX + 1]; struct tcf_chain *goto_ch = NULL; @@ -222,23 +221,6 @@ nla_put_failure: return -1; } -static int tcf_gact_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, gact_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, gact_net_id); - - return tcf_idr_search(tn, a, index); -} - static size_t tcf_gact_get_fill_size(const struct tc_action *act) { size_t sz = nla_total_size(sizeof(struct tc_gact)); /* TCA_GACT_PARMS */ @@ -308,8 +290,6 @@ static struct tc_action_ops act_gact_ops = { .stats_update = tcf_gact_stats_update, .dump = tcf_gact_dump, .init = tcf_gact_init, - .walk = tcf_gact_walker, - .lookup = tcf_gact_search, .get_fill_size = tcf_gact_get_fill_size, .offload_act_setup = tcf_gact_offload_act_setup, .size = sizeof(struct tcf_gact), @@ -317,20 +297,20 @@ static struct tc_action_ops act_gact_ops = { static __net_init int gact_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, gact_net_id); + struct tc_action_net *tn = net_generic(net, act_gact_ops.net_id); return tc_action_net_init(net, tn, &act_gact_ops); } static void __net_exit gact_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, gact_net_id); + tc_action_net_exit(net_list, act_gact_ops.net_id); } static struct pernet_operations gact_net_ops = { .init = gact_init_net, .exit_batch = gact_exit_net, - .id = &gact_net_id, + .id = &act_gact_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index fd5155274733..3049878e7315 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -15,7 +15,6 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_gate.h> -static unsigned int gate_net_id; static struct tc_action_ops act_gate_ops; static ktime_t gate_get_time(struct tcf_gate *gact) @@ -298,7 +297,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, gate_net_id); + struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); enum tk_offsets tk_offset = TK_OFFS_TAI; bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_GATE_MAX + 1]; @@ -565,16 +564,6 @@ nla_put_failure: return -1; } -static int tcf_gate_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, gate_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { @@ -585,13 +574,6 @@ static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, tm->lastuse = max_t(u64, tm->lastuse, lastuse); } -static int tcf_gate_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, gate_net_id); - - return tcf_idr_search(tn, a, index); -} - static size_t tcf_gate_get_fill_size(const struct tc_action *act) { return nla_total_size(sizeof(struct tc_gate)); @@ -654,30 +636,28 @@ static struct tc_action_ops act_gate_ops = { .dump = tcf_gate_dump, .init = tcf_gate_init, .cleanup = tcf_gate_cleanup, - .walk = tcf_gate_walker, .stats_update = tcf_gate_stats_update, .get_fill_size = tcf_gate_get_fill_size, - .lookup = tcf_gate_search, .offload_act_setup = tcf_gate_offload_act_setup, .size = sizeof(struct tcf_gate), }; static __net_init int gate_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, gate_net_id); + struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); return tc_action_net_init(net, tn, &act_gate_ops); } static void __net_exit gate_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, gate_net_id); + tc_action_net_exit(net_list, act_gate_ops.net_id); } static struct pernet_operations gate_net_ops = { .init = gate_init_net, .exit_batch = gate_exit_net, - .id = &gate_net_id, + .id = &act_gate_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 41ba55e60b1b..41d63b33461d 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -30,7 +30,6 @@ #include <linux/etherdevice.h> #include <net/ife.h> -static unsigned int ife_net_id; static int max_metacnt = IFE_META_MAX + 1; static struct tc_action_ops act_ife_ops; @@ -482,7 +481,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, ife_net_id); + struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_IFE_MAX + 1]; struct nlattr *tb2[IFE_META_MAX + 1]; @@ -878,23 +877,6 @@ static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, return tcf_ife_decode(skb, a, res); } -static int tcf_ife_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, ife_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, ife_net_id); - - return tcf_idr_search(tn, a, index); -} - static struct tc_action_ops act_ife_ops = { .kind = "ife", .id = TCA_ID_IFE, @@ -903,27 +885,25 @@ static struct tc_action_ops act_ife_ops = { .dump = tcf_ife_dump, .cleanup = tcf_ife_cleanup, .init = tcf_ife_init, - .walk = tcf_ife_walker, - .lookup = tcf_ife_search, .size = sizeof(struct tcf_ife_info), }; static __net_init int ife_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, ife_net_id); + struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id); return tc_action_net_init(net, tn, &act_ife_ops); } static void __net_exit ife_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, ife_net_id); + tc_action_net_exit(net_list, act_ife_ops.net_id); } static struct pernet_operations ife_net_ops = { .init = ife_init_net, .exit_batch = ife_exit_net, - .id = &ife_net_id, + .id = &act_ife_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 2f3d507c24a1..1625e1037416 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -24,10 +24,7 @@ #include <linux/netfilter_ipv4/ip_tables.h> -static unsigned int ipt_net_id; static struct tc_action_ops act_ipt_ops; - -static unsigned int xt_net_id; static struct tc_action_ops act_xt_ops; static int ipt_init_target(struct net *net, struct xt_entry_target *t, @@ -206,8 +203,8 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, - tp, flags); + return __tcf_ipt_init(net, act_ipt_ops.net_id, nla, est, + a, &act_ipt_ops, tp, flags); } static int tcf_xt_init(struct net *net, struct nlattr *nla, @@ -215,8 +212,8 @@ static int tcf_xt_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, - tp, flags); + return __tcf_ipt_init(net, act_xt_ops.net_id, nla, est, + a, &act_xt_ops, tp, flags); } static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a, @@ -316,23 +313,6 @@ nla_put_failure: return -1; } -static int tcf_ipt_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, ipt_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, ipt_net_id); - - return tcf_idr_search(tn, a, index); -} - static struct tc_action_ops act_ipt_ops = { .kind = "ipt", .id = TCA_ID_IPT, @@ -341,47 +321,28 @@ static struct tc_action_ops act_ipt_ops = { .dump = tcf_ipt_dump, .cleanup = tcf_ipt_release, .init = tcf_ipt_init, - .walk = tcf_ipt_walker, - .lookup = tcf_ipt_search, .size = sizeof(struct tcf_ipt), }; static __net_init int ipt_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, ipt_net_id); + struct tc_action_net *tn = net_generic(net, act_ipt_ops.net_id); return tc_action_net_init(net, tn, &act_ipt_ops); } static void __net_exit ipt_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, ipt_net_id); + tc_action_net_exit(net_list, act_ipt_ops.net_id); } static struct pernet_operations ipt_net_ops = { .init = ipt_init_net, .exit_batch = ipt_exit_net, - .id = &ipt_net_id, + .id = &act_ipt_ops.net_id, .size = sizeof(struct tc_action_net), }; -static int tcf_xt_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, xt_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, xt_net_id); - - return tcf_idr_search(tn, a, index); -} - static struct tc_action_ops act_xt_ops = { .kind = "xt", .id = TCA_ID_XT, @@ -390,27 +351,25 @@ static struct tc_action_ops act_xt_ops = { .dump = tcf_ipt_dump, .cleanup = tcf_ipt_release, .init = tcf_xt_init, - .walk = tcf_xt_walker, - .lookup = tcf_xt_search, .size = sizeof(struct tcf_ipt), }; static __net_init int xt_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, xt_net_id); + struct tc_action_net *tn = net_generic(net, act_xt_ops.net_id); return tc_action_net_init(net, tn, &act_xt_ops); } static void __net_exit xt_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, xt_net_id); + tc_action_net_exit(net_list, act_xt_ops.net_id); } static struct pernet_operations xt_net_ops = { .init = xt_init_net, .exit_batch = xt_exit_net, - .id = &xt_net_id, + .id = &act_xt_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index a1d70cf86843..f9c14d4188d4 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -86,7 +86,6 @@ static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) }, }; -static unsigned int mirred_net_id; static struct tc_action_ops act_mirred_ops; static int tcf_mirred_init(struct net *net, struct nlattr *nla, @@ -94,7 +93,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, mirred_net_id); + struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_MIRRED_MAX + 1]; struct tcf_chain *goto_ch = NULL; @@ -373,23 +372,6 @@ nla_put_failure: return -1; } -static int tcf_mirred_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, mirred_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, mirred_net_id); - - return tcf_idr_search(tn, a, index); -} - static int mirred_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -510,8 +492,6 @@ static struct tc_action_ops act_mirred_ops = { .dump = tcf_mirred_dump, .cleanup = tcf_mirred_release, .init = tcf_mirred_init, - .walk = tcf_mirred_walker, - .lookup = tcf_mirred_search, .get_fill_size = tcf_mirred_get_fill_size, .offload_act_setup = tcf_mirred_offload_act_setup, .size = sizeof(struct tcf_mirred), @@ -520,20 +500,20 @@ static struct tc_action_ops act_mirred_ops = { static __net_init int mirred_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, mirred_net_id); + struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id); return tc_action_net_init(net, tn, &act_mirred_ops); } static void __net_exit mirred_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, mirred_net_id); + tc_action_net_exit(net_list, act_mirred_ops.net_id); } static struct pernet_operations mirred_net_ops = { .init = mirred_init_net, .exit_batch = mirred_exit_net, - .id = &mirred_net_id, + .id = &act_mirred_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index adabeccb63e1..8ad25cc8ccd5 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -15,7 +15,6 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_mpls.h> -static unsigned int mpls_net_id; static struct tc_action_ops act_mpls_ops; #define ACT_MPLS_TTL_DEFAULT 255 @@ -155,7 +154,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, mpls_net_id); + struct tc_action_net *tn = net_generic(net, act_mpls_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_MPLS_MAX + 1]; struct tcf_chain *goto_ch = NULL; @@ -367,23 +366,6 @@ nla_put_failure: return -EMSGSIZE; } -static int tcf_mpls_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, mpls_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, mpls_net_id); - - return tcf_idr_search(tn, a, index); -} - static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) @@ -451,28 +433,26 @@ static struct tc_action_ops act_mpls_ops = { .dump = tcf_mpls_dump, .init = tcf_mpls_init, .cleanup = tcf_mpls_cleanup, - .walk = tcf_mpls_walker, - .lookup = tcf_mpls_search, .offload_act_setup = tcf_mpls_offload_act_setup, .size = sizeof(struct tcf_mpls), }; static __net_init int mpls_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, mpls_net_id); + struct tc_action_net *tn = net_generic(net, act_mpls_ops.net_id); return tc_action_net_init(net, tn, &act_mpls_ops); } static void __net_exit mpls_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, mpls_net_id); + tc_action_net_exit(net_list, act_mpls_ops.net_id); } static struct pernet_operations mpls_net_ops = { .init = mpls_init_net, .exit_batch = mpls_exit_net, - .id = &mpls_net_id, + .id = &act_mpls_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 2a39b3729e84..9265145f1040 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -26,7 +26,6 @@ #include <net/udp.h> -static unsigned int nat_net_id; static struct tc_action_ops act_nat_ops; static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = { @@ -37,7 +36,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, nat_net_id); + struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_NAT_MAX + 1]; struct tcf_chain *goto_ch = NULL; @@ -289,23 +288,6 @@ nla_put_failure: return -1; } -static int tcf_nat_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, nat_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, nat_net_id); - - return tcf_idr_search(tn, a, index); -} - static struct tc_action_ops act_nat_ops = { .kind = "nat", .id = TCA_ID_NAT, @@ -313,27 +295,25 @@ static struct tc_action_ops act_nat_ops = { .act = tcf_nat_act, .dump = tcf_nat_dump, .init = tcf_nat_init, - .walk = tcf_nat_walker, - .lookup = tcf_nat_search, .size = sizeof(struct tcf_nat), }; static __net_init int nat_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, nat_net_id); + struct tc_action_net *tn = net_generic(net, act_nat_ops.net_id); return tc_action_net_init(net, tn, &act_nat_ops); } static void __net_exit nat_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, nat_net_id); + tc_action_net_exit(net_list, act_nat_ops.net_id); } static struct pernet_operations nat_net_ops = { .init = nat_init_net, .exit_batch = nat_exit_net, - .id = &nat_net_id, + .id = &act_nat_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 823ee643371c..94ed5857ce67 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -21,7 +21,6 @@ #include <uapi/linux/tc_act/tc_pedit.h> #include <net/pkt_cls.h> -static unsigned int pedit_net_id; static struct tc_action_ops act_pedit_ops; static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { @@ -139,7 +138,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, pedit_net_id); + struct tc_action_net *tn = net_generic(net, act_pedit_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_PEDIT_MAX + 1]; struct tcf_chain *goto_ch = NULL; @@ -492,23 +491,6 @@ nla_put_failure: return -1; } -static int tcf_pedit_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, pedit_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, pedit_net_id); - - return tcf_idr_search(tn, a, index); -} - static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) @@ -553,28 +535,26 @@ static struct tc_action_ops act_pedit_ops = { .dump = tcf_pedit_dump, .cleanup = tcf_pedit_cleanup, .init = tcf_pedit_init, - .walk = tcf_pedit_walker, - .lookup = tcf_pedit_search, .offload_act_setup = tcf_pedit_offload_act_setup, .size = sizeof(struct tcf_pedit), }; static __net_init int pedit_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, pedit_net_id); + struct tc_action_net *tn = net_generic(net, act_pedit_ops.net_id); return tc_action_net_init(net, tn, &act_pedit_ops); } static void __net_exit pedit_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, pedit_net_id); + tc_action_net_exit(net_list, act_pedit_ops.net_id); } static struct pernet_operations pedit_net_ops = { .init = pedit_init_net, .exit_batch = pedit_exit_net, - .id = &pedit_net_id, + .id = &act_pedit_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index b759628a47c2..0adb26e366a7 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -22,19 +22,8 @@ /* Each policer is serialized by its individual spinlock */ -static unsigned int police_net_id; static struct tc_action_ops act_police_ops; -static int tcf_police_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, police_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE }, [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE }, @@ -58,7 +47,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, struct tc_police *parm; struct tcf_police *police; struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; - struct tc_action_net *tn = net_generic(net, police_net_id); + struct tc_action_net *tn = net_generic(net, act_police_ops.net_id); struct tcf_police_params *new; bool exists = false; u32 index; @@ -412,13 +401,6 @@ nla_put_failure: return -1; } -static int tcf_police_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, police_net_id); - - return tcf_idr_search(tn, a, index); -} - static int tcf_police_act_to_flow_act(int tc_act, u32 *extval, struct netlink_ext_ack *extack) { @@ -513,8 +495,6 @@ static struct tc_action_ops act_police_ops = { .act = tcf_police_act, .dump = tcf_police_dump, .init = tcf_police_init, - .walk = tcf_police_walker, - .lookup = tcf_police_search, .cleanup = tcf_police_cleanup, .offload_act_setup = tcf_police_offload_act_setup, .size = sizeof(struct tcf_police), @@ -522,20 +502,20 @@ static struct tc_action_ops act_police_ops = { static __net_init int police_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, police_net_id); + struct tc_action_net *tn = net_generic(net, act_police_ops.net_id); return tc_action_net_init(net, tn, &act_police_ops); } static void __net_exit police_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, police_net_id); + tc_action_net_exit(net_list, act_police_ops.net_id); } static struct pernet_operations police_net_ops = { .init = police_init_net, .exit_batch = police_exit_net, - .id = &police_net_id, + .id = &act_police_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 2f7f5e44d28c..5ba36f70e3a1 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -23,7 +23,6 @@ #include <linux/if_arp.h> -static unsigned int sample_net_id; static struct tc_action_ops act_sample_ops; static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = { @@ -38,7 +37,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, sample_net_id); + struct tc_action_net *tn = net_generic(net, act_sample_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_SAMPLE_MAX + 1]; struct psample_group *psample_group; @@ -241,23 +240,6 @@ nla_put_failure: return -1; } -static int tcf_sample_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, sample_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, sample_net_id); - - return tcf_idr_search(tn, a, index); -} - static void tcf_psample_group_put(void *priv) { struct psample_group *group = priv; @@ -321,8 +303,6 @@ static struct tc_action_ops act_sample_ops = { .dump = tcf_sample_dump, .init = tcf_sample_init, .cleanup = tcf_sample_cleanup, - .walk = tcf_sample_walker, - .lookup = tcf_sample_search, .get_psample_group = tcf_sample_get_group, .offload_act_setup = tcf_sample_offload_act_setup, .size = sizeof(struct tcf_sample), @@ -330,20 +310,20 @@ static struct tc_action_ops act_sample_ops = { static __net_init int sample_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, sample_net_id); + struct tc_action_net *tn = net_generic(net, act_sample_ops.net_id); return tc_action_net_init(net, tn, &act_sample_ops); } static void __net_exit sample_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, sample_net_id); + tc_action_net_exit(net_list, act_sample_ops.net_id); } static struct pernet_operations sample_net_ops = { .init = sample_init_net, .exit_batch = sample_exit_net, - .id = &sample_net_id, + .id = &act_sample_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 8c1d60bde93e..18d376135461 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -18,7 +18,6 @@ #include <linux/tc_act/tc_defact.h> #include <net/tc_act/tc_defact.h> -static unsigned int simp_net_id; static struct tc_action_ops act_simp_ops; #define SIMP_MAX_DATA 32 @@ -89,7 +88,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, simp_net_id); + struct tc_action_net *tn = net_generic(net, act_simp_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_DEF_MAX + 1]; struct tcf_chain *goto_ch = NULL; @@ -198,23 +197,6 @@ nla_put_failure: return -1; } -static int tcf_simp_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, simp_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, simp_net_id); - - return tcf_idr_search(tn, a, index); -} - static struct tc_action_ops act_simp_ops = { .kind = "simple", .id = TCA_ID_SIMP, @@ -223,27 +205,25 @@ static struct tc_action_ops act_simp_ops = { .dump = tcf_simp_dump, .cleanup = tcf_simp_release, .init = tcf_simp_init, - .walk = tcf_simp_walker, - .lookup = tcf_simp_search, .size = sizeof(struct tcf_defact), }; static __net_init int simp_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, simp_net_id); + struct tc_action_net *tn = net_generic(net, act_simp_ops.net_id); return tc_action_net_init(net, tn, &act_simp_ops); } static void __net_exit simp_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, simp_net_id); + tc_action_net_exit(net_list, act_simp_ops.net_id); } static struct pernet_operations simp_net_ops = { .init = simp_init_net, .exit_batch = simp_exit_net, - .id = &simp_net_id, + .id = &act_simp_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index e3bd11dfe1ca..7f598784fd30 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -20,7 +20,6 @@ #include <linux/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h> -static unsigned int skbedit_net_id; static struct tc_action_ops act_skbedit_ops; static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params, @@ -118,7 +117,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 act_flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, skbedit_net_id); + struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id); bool bind = act_flags & TCA_ACT_FLAGS_BIND; struct tcf_skbedit_params *params_new; struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; @@ -347,23 +346,6 @@ static void tcf_skbedit_cleanup(struct tc_action *a) kfree_rcu(params, rcu); } -static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, skbedit_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, skbedit_net_id); - - return tcf_idr_search(tn, a, index); -} - static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) { return nla_total_size(sizeof(struct tc_skbedit)) @@ -428,29 +410,27 @@ static struct tc_action_ops act_skbedit_ops = { .dump = tcf_skbedit_dump, .init = tcf_skbedit_init, .cleanup = tcf_skbedit_cleanup, - .walk = tcf_skbedit_walker, .get_fill_size = tcf_skbedit_get_fill_size, - .lookup = tcf_skbedit_search, .offload_act_setup = tcf_skbedit_offload_act_setup, .size = sizeof(struct tcf_skbedit), }; static __net_init int skbedit_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, skbedit_net_id); + struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id); return tc_action_net_init(net, tn, &act_skbedit_ops); } static void __net_exit skbedit_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, skbedit_net_id); + tc_action_net_exit(net_list, act_skbedit_ops.net_id); } static struct pernet_operations skbedit_net_ops = { .init = skbedit_init_net, .exit_batch = skbedit_exit_net, - .id = &skbedit_net_id, + .id = &act_skbedit_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 2083612d8780..d98758a63934 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -19,7 +19,6 @@ #include <linux/tc_act/tc_skbmod.h> #include <net/tc_act/tc_skbmod.h> -static unsigned int skbmod_net_id; static struct tc_action_ops act_skbmod_ops; static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, @@ -103,7 +102,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, skbmod_net_id); + struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); bool ovr = flags & TCA_ACT_FLAGS_REPLACE; bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_SKBMOD_MAX + 1]; @@ -276,23 +275,6 @@ nla_put_failure: return -1; } -static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, skbmod_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, skbmod_net_id); - - return tcf_idr_search(tn, a, index); -} - static struct tc_action_ops act_skbmod_ops = { .kind = "skbmod", .id = TCA_ACT_SKBMOD, @@ -301,27 +283,25 @@ static struct tc_action_ops act_skbmod_ops = { .dump = tcf_skbmod_dump, .init = tcf_skbmod_init, .cleanup = tcf_skbmod_cleanup, - .walk = tcf_skbmod_walker, - .lookup = tcf_skbmod_search, .size = sizeof(struct tcf_skbmod), }; static __net_init int skbmod_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, skbmod_net_id); + struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); return tc_action_net_init(net, tn, &act_skbmod_ops); } static void __net_exit skbmod_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, skbmod_net_id); + tc_action_net_exit(net_list, act_skbmod_ops.net_id); } static struct pernet_operations skbmod_net_ops = { .init = skbmod_init_net, .exit_batch = skbmod_exit_net, - .id = &skbmod_net_id, + .id = &act_skbmod_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 856dc23cef8c..2691a3d8e451 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -20,7 +20,6 @@ #include <linux/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_tunnel_key.h> -static unsigned int tunnel_key_net_id; static struct tc_action_ops act_tunnel_key_ops; static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, @@ -358,7 +357,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 act_flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); + struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id); bool bind = act_flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; struct tcf_tunnel_key_params *params_new; @@ -770,23 +769,6 @@ nla_put_failure: return -1; } -static int tunnel_key_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - -static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); - - return tcf_idr_search(tn, a, index); -} - static void tcf_tunnel_encap_put_tunnel(void *priv) { struct ip_tunnel_info *tunnel = priv; @@ -850,28 +832,26 @@ static struct tc_action_ops act_tunnel_key_ops = { .dump = tunnel_key_dump, .init = tunnel_key_init, .cleanup = tunnel_key_release, - .walk = tunnel_key_walker, - .lookup = tunnel_key_search, .offload_act_setup = tcf_tunnel_key_offload_act_setup, .size = sizeof(struct tcf_tunnel_key), }; static __net_init int tunnel_key_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); + struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id); return tc_action_net_init(net, tn, &act_tunnel_key_ops); } static void __net_exit tunnel_key_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, tunnel_key_net_id); + tc_action_net_exit(net_list, act_tunnel_key_ops.net_id); } static struct pernet_operations tunnel_key_net_ops = { .init = tunnel_key_init_net, .exit_batch = tunnel_key_exit_net, - .id = &tunnel_key_net_id, + .id = &act_tunnel_key_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 68b5e772386a..7b24e898a3e6 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -16,7 +16,6 @@ #include <linux/tc_act/tc_vlan.h> #include <net/tc_act/tc_vlan.h> -static unsigned int vlan_net_id; static struct tc_action_ops act_vlan_ops; static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a, @@ -117,7 +116,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { - struct tc_action_net *tn = net_generic(net, vlan_net_id); + struct tc_action_net *tn = net_generic(net, act_vlan_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_VLAN_MAX + 1]; struct tcf_chain *goto_ch = NULL; @@ -333,16 +332,6 @@ nla_put_failure: return -1; } -static int tcf_vlan_walker(struct net *net, struct sk_buff *skb, - struct netlink_callback *cb, int type, - const struct tc_action_ops *ops, - struct netlink_ext_ack *extack) -{ - struct tc_action_net *tn = net_generic(net, vlan_net_id); - - return tcf_generic_walker(tn, skb, cb, type, ops, extack); -} - static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { @@ -353,13 +342,6 @@ static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets, tm->lastuse = max_t(u64, tm->lastuse, lastuse); } -static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index) -{ - struct tc_action_net *tn = net_generic(net, vlan_net_id); - - return tcf_idr_search(tn, a, index); -} - static size_t tcf_vlan_get_fill_size(const struct tc_action *act) { return nla_total_size(sizeof(struct tc_vlan)) @@ -438,30 +420,28 @@ static struct tc_action_ops act_vlan_ops = { .dump = tcf_vlan_dump, .init = tcf_vlan_init, .cleanup = tcf_vlan_cleanup, - .walk = tcf_vlan_walker, .stats_update = tcf_vlan_stats_update, .get_fill_size = tcf_vlan_get_fill_size, - .lookup = tcf_vlan_search, .offload_act_setup = tcf_vlan_offload_act_setup, .size = sizeof(struct tcf_vlan), }; static __net_init int vlan_init_net(struct net *net) { - struct tc_action_net *tn = net_generic(net, vlan_net_id); + struct tc_action_net *tn = net_generic(net, act_vlan_ops.net_id); return tc_action_net_init(net, tn, &act_vlan_ops); } static void __net_exit vlan_exit_net(struct list_head *net_list) { - tc_action_net_exit(net_list, vlan_net_id); + tc_action_net_exit(net_list, act_vlan_ops.net_id); } static struct pernet_operations vlan_net_ops = { .init = vlan_init_net, .exit_batch = vlan_exit_net, - .id = &vlan_net_id, + .id = &act_vlan_ops.net_id, .size = sizeof(struct tc_action_net), }; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 790d6809be81..5d0d57dc5cb7 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1977,9 +1977,6 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, bool rtnl_held = false; u32 flags; - if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - replay: tp_created = 0; @@ -2208,9 +2205,6 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, int err; bool rtnl_held = false; - if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) @@ -2826,10 +2820,6 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, unsigned long cl; int err; - if (n->nlmsg_type != RTM_GETCHAIN && - !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - replay: q = NULL; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, @@ -3639,9 +3629,6 @@ int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, if (err) return err; - if (!block_index) - return 0; - qe->info.binder_type = binder_type; qe->info.chain_head_change = tcf_chain_head_change_dflt; qe->info.chain_head_change_priv = &qe->filter_chain; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 3f935cbbaff6..29adac7812fe 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -424,6 +424,11 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, return -EINVAL; } + if (!nhandle) { + NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid"); + return -EINVAL; + } + h1 = to_hash(nhandle); b = rtnl_dereference(head->table[h1]); if (!b) { @@ -477,8 +482,13 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, int err; bool new = true; + if (!handle) { + NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid"); + return -EINVAL; + } + if (opt == NULL) - return handle ? -EINVAL : 0; + return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt, route4_policy, NULL); @@ -486,7 +496,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, return err; fold = *arg; - if (fold && handle && fold->handle != handle) + if (fold && fold->handle != handle) return -EINVAL; err = -ENOBUFS; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index bf87b50837a8..db1569fac57c 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -171,7 +171,7 @@ out_einval: } EXPORT_SYMBOL(register_qdisc); -int unregister_qdisc(struct Qdisc_ops *qops) +void unregister_qdisc(struct Qdisc_ops *qops) { struct Qdisc_ops *q, **qp; int err = -ENOENT; @@ -186,7 +186,8 @@ int unregister_qdisc(struct Qdisc_ops *qops) err = 0; } write_unlock(&qdisc_mod_lock); - return err; + + WARN(err, "unregister qdisc(%s) failed\n", qops->id); } EXPORT_SYMBOL(unregister_qdisc); @@ -194,7 +195,7 @@ EXPORT_SYMBOL(unregister_qdisc); void qdisc_get_default(char *name, size_t len) { read_lock(&qdisc_mod_lock); - strlcpy(name, default_qdisc_ops->id, len); + strscpy(name, default_qdisc_ops->id, len); read_unlock(&qdisc_mod_lock); } @@ -1163,7 +1164,7 @@ static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca, static struct Qdisc *qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, - struct Qdisc *p, u32 parent, u32 handle, + u32 parent, u32 handle, struct nlattr **tca, int *errp, struct netlink_ext_ack *extack) { @@ -1424,10 +1425,6 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, struct Qdisc *p = NULL; int err; - if ((n->nlmsg_type != RTM_GETQDISC) && - !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) @@ -1508,9 +1505,6 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, struct Qdisc *q, *p; int err; - if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - replay: /* Reinit, just in case something touches this. */ err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, @@ -1640,7 +1634,7 @@ create_n_graft: } if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) { - q = qdisc_create(dev, dev_ingress_queue(dev), p, + q = qdisc_create(dev, dev_ingress_queue(dev), tcm->tcm_parent, tcm->tcm_parent, tca, &err, extack); } else { @@ -1657,7 +1651,7 @@ create_n_graft: else dev_queue = netdev_get_tx_queue(dev, 0); - q = qdisc_create(dev, dev_queue, p, + q = qdisc_create(dev, dev_queue, tcm->tcm_parent, tcm->tcm_handle, tca, &err, extack); } @@ -1992,10 +1986,6 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, u32 qid; int err; - if ((n->nlmsg_type != RTM_GETTCLASS) && - !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 4c8e994cf0a5..816fd0d7ba38 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -577,7 +577,6 @@ static void atm_tc_reset(struct Qdisc *sch) pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) qdisc_reset(flow->q); - sch->q.qlen = 0; } static void atm_tc_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index a43a58a73d09..36acc95d611e 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -2569,9 +2569,6 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt, struct nlattr *tb[TCA_CAKE_MAX + 1]; int err; - if (!opt) - return -EINVAL; - err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy, extack); if (err < 0) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 91a0dc463c48..ba99ce05cd52 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -975,7 +975,6 @@ cbq_reset(struct Qdisc *sch) cl->cpriority = cl->priority; } } - sch->q.qlen = 0; } diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 2adbd945bf15..3ac3e5c80b6f 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -60,7 +60,6 @@ struct choke_sched_data { u32 forced_drop; /* Forced drops, qavg > max_thresh */ u32 forced_mark; /* Forced marks, qavg > max_thresh */ u32 pdrop; /* Drops due to queue limits */ - u32 other; /* Drops due to drop() calls */ u32 matched; /* Drops to flow match */ } stats; @@ -315,8 +314,6 @@ static void choke_reset(struct Qdisc *sch) rtnl_qdisc_drop(skb, sch); } - sch->q.qlen = 0; - sch->qstats.backlog = 0; if (q->tab) memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q->head = q->tail = 0; @@ -466,7 +463,6 @@ static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, - .other = q->stats.other, .matched = q->stats.matched, }; diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 30169b3adbbb..d7a4874543de 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -138,9 +138,6 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt, unsigned int qlen, dropped = 0; int err; - if (!opt) - return -EINVAL; - err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt, codel_policy, NULL); if (err < 0) diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 18e4f7a0b291..4e5b1cf11b85 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -441,8 +441,6 @@ static void drr_reset_qdisc(struct Qdisc *sch) qdisc_reset(cl->qdisc); } } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void drr_destroy_qdisc(struct Qdisc *sch) diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 4c100d105269..7da6dc38a382 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -409,8 +409,6 @@ static void dsmark_reset(struct Qdisc *sch) pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); if (p->q) qdisc_reset(p->q); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void dsmark_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index c48f91075b5c..61d1f0e32cf3 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -323,9 +323,6 @@ static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q, struct tc_etf_qopt_offload etf = { }; int err; - if (q->offload) - return 0; - if (!ops->ndo_setup_tc) { NL_SET_ERR_MSG(extack, "Specified device does not support ETF offload"); return -EOPNOTSUPP; @@ -445,9 +442,6 @@ static void etf_reset(struct Qdisc *sch) timesortedlist_clear(sch); __qdisc_reset_queue(&sch->q); - sch->qstats.backlog = 0; - sch->q.qlen = 0; - q->last = 0; } diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index d73393493553..a3aea22ef09d 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -594,11 +594,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, unsigned int i; int err; - if (!opt) { - NL_SET_ERR_MSG(extack, "ETS options are required for this operation"); - return -EINVAL; - } - err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_policy, extack); if (err < 0) return err; @@ -727,8 +722,6 @@ static void ets_qdisc_reset(struct Qdisc *sch) } for (band = 0; band < q->nbands; band++) qdisc_reset(q->classes[band].qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void ets_qdisc_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 2fb76fc0cc31..48d14fb90ba0 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -808,9 +808,6 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, unsigned drop_len = 0; u32 fq_log; - if (!opt) - return -EINVAL; - err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy, NULL); if (err < 0) diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 839e1235db05..eeea8c6d54e2 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -347,8 +347,6 @@ static void fq_codel_reset(struct Qdisc *sch) codel_vars_init(&flow->cvars); } memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); - sch->q.qlen = 0; - sch->qstats.backlog = 0; q->memory_usage = 0; } @@ -374,9 +372,6 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, u32 quantum = 0; int err; - if (!opt) - return -EINVAL; - err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy, NULL); if (err < 0) @@ -483,26 +478,24 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, if (opt) { err = fq_codel_change(sch, opt, extack); if (err) - goto init_failure; + return err; } err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) - goto init_failure; + return err; if (!q->flows) { q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_codel_flow), GFP_KERNEL); - if (!q->flows) { - err = -ENOMEM; - goto init_failure; - } + if (!q->flows) + return -ENOMEM; + q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); - if (!q->backlogs) { - err = -ENOMEM; - goto alloc_failure; - } + if (!q->backlogs) + return -ENOMEM; + for (i = 0; i < q->flows_cnt; i++) { struct fq_codel_flow *flow = q->flows + i; @@ -515,13 +508,6 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, else sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; - -alloc_failure: - kvfree(q->flows); - q->flows = NULL; -init_failure: - q->flows_cnt = 0; - return err; } static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index d6aba6edd16e..6980796d435d 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -283,9 +283,6 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, unsigned int num_dropped = 0; int err; - if (!opt) - return -EINVAL; - err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack); if (err < 0) return err; @@ -521,9 +518,6 @@ static void fq_pie_reset(struct Qdisc *sch) INIT_LIST_HEAD(&flow->flowchain); pie_vars_init(&flow->vars); } - - sch->q.qlen = 0; - sch->qstats.backlog = 0; } static void fq_pie_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index d47b9689eba6..a9aadc4e6858 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -409,7 +409,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets) void __qdisc_run(struct Qdisc *q) { - int quota = dev_tx_weight; + int quota = READ_ONCE(dev_tx_weight); int packets; while (qdisc_restart(q, &packets)) { @@ -941,7 +941,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, goto errout; __skb_queue_head_init(&sch->gso_skb); __skb_queue_head_init(&sch->skb_bad_txq); - qdisc_skb_head_init(&sch->q); gnet_stats_basic_sync_init(&sch->bstats); spin_lock_init(&sch->q.lock); @@ -1122,6 +1121,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, } EXPORT_SYMBOL(dev_graft_qdisc); +static void shutdown_scheduler_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_qdisc_default) +{ + struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc_default = _qdisc_default; + + if (qdisc) { + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); + dev_queue->qdisc_sleeping = qdisc_default; + + qdisc_put(qdisc); + } +} + static void attach_one_default_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) @@ -1169,6 +1183,7 @@ static void attach_default_qdiscs(struct net_device *dev) if (qdisc == &noop_qdisc) { netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", default_qdisc_ops->id, noqueue_qdisc_ops.id); + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); qdisc = txq->qdisc_sleeping; @@ -1447,21 +1462,6 @@ void dev_init_scheduler(struct net_device *dev) timer_setup(&dev->watchdog_timer, dev_watchdog, 0); } -static void shutdown_scheduler_queue(struct net_device *dev, - struct netdev_queue *dev_queue, - void *_qdisc_default) -{ - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; - struct Qdisc *qdisc_default = _qdisc_default; - - if (qdisc) { - rcu_assign_pointer(dev_queue->qdisc, qdisc_default); - dev_queue->qdisc_sleeping = qdisc_default; - - qdisc_put(qdisc); - } -} - void dev_shutdown(struct net_device *dev) { netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 1073c76d05c4..a661b062cca8 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -648,9 +648,6 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt, u32 max_P; struct gred_sched_data *prealloc; - if (opt == NULL) - return -EINVAL; - err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, extack); if (err < 0) @@ -829,7 +826,6 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) opt.Wlog = q->parms.Wlog; opt.Plog = q->parms.Plog; opt.Scell_log = q->parms.Scell_log; - opt.other = q->stats.other; opt.early = q->stats.prob_drop; opt.forced = q->stats.forced_drop; opt.pdrop = q->stats.pdrop; @@ -895,8 +891,6 @@ append_opt: goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop)) goto nla_put_failure; - if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other)) - goto nla_put_failure; nla_nest_end(skb, vq); } @@ -914,10 +908,9 @@ static void gred_destroy(struct Qdisc *sch) struct gred_sched *table = qdisc_priv(sch); int i; - for (i = 0; i < table->DPs; i++) { - if (table->tab[i]) - gred_destroy_vq(table->tab[i]); - } + for (i = 0; i < table->DPs; i++) + gred_destroy_vq(table->tab[i]); + gred_offload(sch, TC_GRED_DESTROY); kfree(table->opt); } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index d3979a6000e7..c8bef923c79c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1430,7 +1430,7 @@ hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt, struct hfsc_sched *q = qdisc_priv(sch); struct tc_hfsc_qopt *qopt; - if (opt == NULL || nla_len(opt) < sizeof(*qopt)) + if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); @@ -1484,8 +1484,6 @@ hfsc_reset_qdisc(struct Qdisc *sch) } q->eligible = RB_ROOT; qdisc_watchdog_cancel(&q->watchdog); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 420ede875322..d26cd436cbe3 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -516,9 +516,6 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt, u32 new_quantum = q->quantum; u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight; - if (!opt) - return -EINVAL; - err = nla_parse_nested_deprecated(tb, TCA_HHF_MAX, opt, hhf_policy, NULL); if (err < 0) diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 23a9d6242429..78d0c7549c74 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1008,8 +1008,6 @@ static void htb_reset(struct Qdisc *sch) } qdisc_watchdog_cancel(&q->watchdog); __qdisc_reset_queue(&q->direct_queue); - sch->q.qlen = 0; - sch->qstats.backlog = 0; memset(q->hlevel, 0, sizeof(q->hlevel)); memset(q->row_mask, 0, sizeof(q->row_mask)); } @@ -1104,9 +1102,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, err = qdisc_class_hash_init(&q->clhash); if (err < 0) - goto err_free_direct_qdiscs; - - qdisc_skb_head_init(&q->direct_queue); + return err; if (tb[TCA_HTB_DIRECT_QLEN]) q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); @@ -1127,8 +1123,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, 0), extack); if (!qdisc) { - err = -ENOMEM; - goto err_free_qdiscs; + return -ENOMEM; } htb_set_lockdep_class_child(qdisc); @@ -1146,7 +1141,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, }; err = htb_offload(dev, &offload_opt); if (err) - goto err_free_qdiscs; + return err; /* Defer this assignment, so that htb_destroy skips offload-related * parts (especially calling ndo_setup_tc) on errors. @@ -1154,22 +1149,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt, q->offload = true; return 0; - -err_free_qdiscs: - for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx]; - ntx++) - qdisc_put(q->direct_qdiscs[ntx]); - - qdisc_class_hash_destroy(&q->clhash); - /* Prevent use-after-free and double-free when htb_destroy gets called. - */ - q->clhash.hash = NULL; - q->clhash.hashsize = 0; - -err_free_direct_qdiscs: - kfree(q->direct_qdiscs); - q->direct_qdiscs = NULL; - return err; } static void htb_attach_offload(struct Qdisc *sch) @@ -1692,13 +1671,12 @@ static void htb_destroy(struct Qdisc *sch) qdisc_class_hash_destroy(&q->clhash); __qdisc_reset_queue(&q->direct_queue); - if (!q->offload) - return; - - offload_opt = (struct tc_htb_qopt_offload) { - .command = TC_HTB_DESTROY, - }; - htb_offload(dev, &offload_opt); + if (q->offload) { + offload_opt = (struct tc_htb_qopt_offload) { + .command = TC_HTB_DESTROY, + }; + htb_offload(dev, &offload_opt); + } if (!q->direct_qdiscs) return; diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index cd8ab90c4765..f28050c7f12d 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -152,7 +152,6 @@ multiq_reset(struct Qdisc *sch) for (band = 0; band < q->bands; band++) qdisc_reset(q->queues[band]); - sch->q.qlen = 0; q->curband = 0; } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 5449ed114e40..b70ac04110dd 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -961,9 +961,6 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, int old_loss_model = CLG_RANDOM; int ret; - if (opt == NULL) - return -EINVAL; - qopt = nla_data(opt); ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); if (ret < 0) diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 5a457ff61acd..974038ba6c7b 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -143,9 +143,6 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt, unsigned int qlen, dropped = 0; int err; - if (!opt) - return -EINVAL; - err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy, NULL); if (err < 0) diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index cbc2ebca4548..ea8c4a7174bb 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -161,9 +161,6 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt, struct plug_sched_data *q = qdisc_priv(sch); struct tc_plug_qopt *msg; - if (opt == NULL) - return -EINVAL; - msg = nla_data(opt); if (nla_len(opt) < sizeof(*msg)) return -EINVAL; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 3b8d7197c06b..298794c04836 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -135,8 +135,6 @@ prio_reset(struct Qdisc *sch) for (prio = 0; prio < q->bands; prio++) qdisc_reset(q->queues[prio]); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt) @@ -187,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt, return -EINVAL; qopt = nla_data(opt); - if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) + if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < TCQ_MIN_PRIO_BANDS) return -EINVAL; for (i = 0; i <= TC_PRIO_MAX; i++) { diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index d4ce58c90f9f..13246a9dc5c1 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1458,8 +1458,6 @@ static void qfq_reset_qdisc(struct Qdisc *sch) qdisc_reset(cl->qdisc); } } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void qfq_destroy_qdisc(struct Qdisc *sch) diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 40adf1f07a82..4952406f70b9 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -176,8 +176,6 @@ static void red_reset(struct Qdisc *sch) struct red_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; red_restart(&q->vars); } @@ -370,9 +368,6 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, struct nlattr *tb[TCA_RED_MAX + 1]; int err; - if (!opt) - return -EINVAL; - err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy, extack); if (err < 0) @@ -463,7 +458,6 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) } st.early = q->stats.prob_drop + q->stats.forced_drop; st.pdrop = q->stats.pdrop; - st.other = q->stats.other; st.marked = q->stats.prob_mark + q->stats.forced_mark; return gnet_stats_copy_app(d, &st, sizeof(st)); diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 3d061a13d7ed..1be8d04d69dc 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) } } -static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) +static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) { u32 sfbhash; - sfbhash = sfb_hash(skb, 0); + sfbhash = cb->hashes[0]; if (sfbhash) increment_one_qlen(sfbhash, 0, q); - sfbhash = sfb_hash(skb, 1); + sfbhash = cb->hashes[1]; if (sfbhash) increment_one_qlen(sfbhash, 1, q); } @@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, { struct sfb_sched_data *q = qdisc_priv(sch); + unsigned int len = qdisc_pkt_len(skb); struct Qdisc *child = q->qdisc; struct tcf_proto *fl; + struct sfb_skb_cb cb; int i; u32 p_min = ~0; u32 minqlen = ~0; @@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, } enqueue: + memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; - increment_qlen(skb, q); + increment_qlen(&cb, q); } else if (net_xmit_drop_count(ret)) { q->stats.childdrop++; qdisc_qstats_drop(sch); @@ -453,8 +456,6 @@ static void sfb_reset(struct Qdisc *sch) struct sfb_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 7a5e4c454715..df72fb83d9c7 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -213,9 +213,6 @@ static void skbprio_reset(struct Qdisc *sch) struct skbprio_sched_data *q = qdisc_priv(sch); int prio; - sch->qstats.backlog = 0; - sch->q.qlen = 0; - for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 0b941dd63d26..db88a692ef81 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1636,8 +1636,6 @@ static void taprio_reset(struct Qdisc *sch) if (q->qdiscs[i]) qdisc_reset(q->qdiscs[i]); } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void taprio_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 72102277449e..e031c1a41ea6 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -330,8 +330,6 @@ static void tbf_reset(struct Qdisc *sch) struct tbf_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; q->t_c = ktime_get_ns(); q->tokens = q->buffer; q->ptokens = q->mtu; @@ -356,6 +354,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, struct nlattr *tb[TCA_TBF_MAX + 1]; struct tc_tbf_qopt *qopt; struct Qdisc *child = NULL; + struct Qdisc *old = NULL; struct psched_ratecfg rate; struct psched_ratecfg peak; u64 max_size; @@ -447,7 +446,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch); if (child) { qdisc_tree_flush_backlog(q->qdisc); - qdisc_put(q->qdisc); + old = q->qdisc; q->qdisc = child; } q->limit = qopt->limit; @@ -467,6 +466,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); sch_tree_unlock(sch); + qdisc_put(old); err = 0; tbf_offload_change(sch); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 6af6b95bdb67..16f9238aa51d 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -124,7 +124,6 @@ teql_reset(struct Qdisc *sch) struct teql_sched_data *dat = qdisc_priv(sch); skb_queue_purge(&dat->q); - sch->q.qlen = 0; } static void @@ -492,7 +491,7 @@ static int __init teql_init(void) master = netdev_priv(dev); - strlcpy(master->qops.id, dev->name, IFNAMSIZ); + strscpy(master->qops.id, dev->name, IFNAMSIZ); err = register_qdisc(&master->qops); if (err) { diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 79c1318af1fe..0939cc3b915a 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1855,7 +1855,6 @@ static void smc_listen_out_connected(struct smc_sock *new_smc) { struct sock *newsmcsk = &new_smc->sk; - sk_refcnt_debug_inc(newsmcsk); if (newsmcsk->sk_state == SMC_INIT) newsmcsk->sk_state = SMC_ACTIVE; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index ff49a11f57b8..ebf56cdf17db 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -757,6 +757,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, lnk->lgr = lgr; smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */ lnk->link_idx = link_idx; + lnk->wr_rx_id_compl = 0; smc_ibdev_cnt_inc(lnk); smcr_copy_dev_info_to_link(lnk); atomic_set(&lnk->conn_cnt, 0); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index fe8b524ad846..285f9bd8e232 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -115,8 +115,10 @@ struct smc_link { dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */ dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/ u64 wr_rx_id; /* seq # of last recv WR */ + u64 wr_rx_id_compl; /* seq # of last completed WR */ u32 wr_rx_cnt; /* number of WR recv buffers */ unsigned long wr_rx_tstamp; /* jiffies when last buf rx */ + wait_queue_head_t wr_rx_empty_wait; /* wait for RQ empty */ struct ib_reg_wr wr_reg; /* WR register memory region */ wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ diff --git a/net/smc/smc_netlink.c b/net/smc/smc_netlink.c index c5a62f6f52ba..621c46c70073 100644 --- a/net/smc/smc_netlink.c +++ b/net/smc/smc_netlink.c @@ -142,7 +142,8 @@ struct genl_family smc_gen_nl_family __ro_after_init = { .netnsok = true, .module = THIS_MODULE, .ops = smc_gen_nl_ops, - .n_ops = ARRAY_SIZE(smc_gen_nl_ops) + .n_ops = ARRAY_SIZE(smc_gen_nl_ops), + .resv_start_op = SMC_NETLINK_DISABLE_HS_LIMITATION + 1, }; int __init smc_nl_init(void) diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 4c3bf6db7038..25fb2fd186e2 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -715,7 +715,8 @@ static struct genl_family smc_pnet_nl_family __ro_after_init = { .netnsok = true, .module = THIS_MODULE, .ops = smc_pnet_ops, - .n_ops = ARRAY_SIZE(smc_pnet_ops) + .n_ops = ARRAY_SIZE(smc_pnet_ops), + .resv_start_op = SMC_PNETID_FLUSH + 1, }; bool smc_pnet_is_ndev_pnetid(struct net *net, u8 *pnetid) diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 26f8f240d9e8..b0678a417e09 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -454,6 +454,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) for (i = 0; i < num; i++) { link = wc[i].qp->qp_context; + link->wr_rx_id_compl = wc[i].wr_id; if (wc[i].status == IB_WC_SUCCESS) { link->wr_rx_tstamp = jiffies; smc_wr_rx_demultiplex(&wc[i]); @@ -465,6 +466,8 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) case IB_WC_RNR_RETRY_EXC_ERR: case IB_WC_WR_FLUSH_ERR: smcr_link_down_cond_sched(link); + if (link->wr_rx_id_compl == link->wr_rx_id) + wake_up(&link->wr_rx_empty_wait); break; default: smc_wr_rx_post(link); /* refill WR RX */ @@ -639,6 +642,7 @@ void smc_wr_free_link(struct smc_link *lnk) return; ibdev = lnk->smcibdev->ibdev; + smc_wr_drain_cq(lnk); smc_wr_wakeup_reg_wait(lnk); smc_wr_wakeup_tx_wait(lnk); @@ -889,6 +893,7 @@ int smc_wr_create_link(struct smc_link *lnk) atomic_set(&lnk->wr_tx_refcnt, 0); init_waitqueue_head(&lnk->wr_reg_wait); atomic_set(&lnk->wr_reg_refcnt, 0); + init_waitqueue_head(&lnk->wr_rx_empty_wait); return rc; dma_unmap: diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index a54e90a1110f..45e9b894d3f8 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -73,6 +73,11 @@ static inline void smc_wr_tx_link_put(struct smc_link *link) wake_up_all(&link->wr_tx_wait); } +static inline void smc_wr_drain_cq(struct smc_link *lnk) +{ + wait_event(lnk->wr_rx_empty_wait, lnk->wr_rx_id_compl == lnk->wr_rx_id); +} + static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk) { wake_up_all(&lnk->wr_tx_wait); diff --git a/net/socket.c b/net/socket.c index 9b27c5e4e5ba..7378375d3a5b 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1801,7 +1801,7 @@ int __sys_listen(int fd, int backlog) sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { - somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; + somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn); if ((unsigned int)backlog > somaxconn) backlog = somaxconn; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index b098e707ad41..7d268a291486 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1902,7 +1902,7 @@ call_encode(struct rpc_task *task) break; case -EKEYEXPIRED: if (!task->tk_cred_retry) { - rpc_exit(task, task->tk_status); + rpc_call_rpcerror(task, task->tk_status); } else { task->tk_action = call_refresh; task->tk_cred_retry--; diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c index 7330eb9a70cf..c65c90ad626a 100644 --- a/net/sunrpc/sysfs.c +++ b/net/sunrpc/sysfs.c @@ -291,8 +291,10 @@ static ssize_t rpc_sysfs_xprt_state_change(struct kobject *kobj, int offline = 0, online = 0, remove = 0; struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj); - if (!xprt) - return 0; + if (!xprt || !xps) { + count = 0; + goto out_put; + } if (!strncmp(buf, "offline", 7)) offline = 1; diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 2f4d23238a7e..9618e4429f0f 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -160,7 +160,7 @@ static void map_set(u64 *up_map, int i, unsigned int v) static int map_get(u64 up_map, int i) { - return (up_map & (1 << i)) >> i; + return (up_map & (1ULL << i)) >> i; } static struct tipc_peer *peer_prev(struct tipc_peer *peer) diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index c447cb5f879e..e8fd257c0e68 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -294,6 +294,7 @@ struct genl_family tipc_genl_family __ro_after_init = { .module = THIS_MODULE, .ops = tipc_genl_v2_ops, .n_ops = ARRAY_SIZE(tipc_genl_v2_ops), + .resv_start_op = TIPC_NL_ADDR_LEGACY_GET + 1, }; int __init tipc_netlink_start(void) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 0749df80454d..fc68733673ba 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -1357,6 +1357,7 @@ static struct genl_family tipc_genl_compat_family __ro_after_init = { .module = THIS_MODULE, .small_ops = tipc_genl_compat_ops, .n_small_ops = ARRAY_SIZE(tipc_genl_compat_ops), + .resv_start_op = TIPC_GENL_CMD + 1, }; int __init tipc_netlink_compat_start(void) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f76119f62f1b..fe27241cd13f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -2702,7 +2702,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) crypto_info->version != TLS_1_3_VERSION && !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); - tls_strp_init(&sw_ctx_rx->strp, sk); + rc = tls_strp_init(&sw_ctx_rx->strp, sk); + if (rc) + goto free_aead; } goto out; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index bf338b782fc4..dea2972c8178 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -785,15 +785,45 @@ static int unix_set_peek_off(struct sock *sk, int val) } #ifdef CONFIG_PROC_FS +static int unix_count_nr_fds(struct sock *sk) +{ + struct sk_buff *skb; + struct unix_sock *u; + int nr_fds = 0; + + spin_lock(&sk->sk_receive_queue.lock); + skb = skb_peek(&sk->sk_receive_queue); + while (skb) { + u = unix_sk(skb->sk); + nr_fds += atomic_read(&u->scm_stat.nr_fds); + skb = skb_peek_next(skb, &sk->sk_receive_queue); + } + spin_unlock(&sk->sk_receive_queue.lock); + + return nr_fds; +} + static void unix_show_fdinfo(struct seq_file *m, struct socket *sock) { struct sock *sk = sock->sk; struct unix_sock *u; + int nr_fds; if (sk) { - u = unix_sk(sock->sk); - seq_printf(m, "scm_fds: %u\n", - atomic_read(&u->scm_stat.nr_fds)); + u = unix_sk(sk); + if (sock->type == SOCK_DGRAM) { + nr_fds = atomic_read(&u->scm_stat.nr_fds); + goto out_print; + } + + unix_state_lock(sk); + if (sk->sk_state != TCP_LISTEN) + nr_fds = atomic_read(&u->scm_stat.nr_fds); + else + nr_fds = unix_count_nr_fds(sk); + unix_state_unlock(sk); +out_print: + seq_printf(m, "scm_fds: %u\n", nr_fds); } } #else diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index b4ee163154a6..ee418701cdee 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -882,6 +882,16 @@ s64 vsock_stream_has_space(struct vsock_sock *vsk) } EXPORT_SYMBOL_GPL(vsock_stream_has_space); +void vsock_data_ready(struct sock *sk) +{ + struct vsock_sock *vsk = vsock_sk(sk); + + if (vsock_stream_has_data(vsk) >= sk->sk_rcvlowat || + sock_flag(sk, SOCK_DONE)) + sk->sk_data_ready(sk); +} +EXPORT_SYMBOL_GPL(vsock_data_ready); + static int vsock_release(struct socket *sock) { __vsock_release(sock->sk, 0); @@ -1066,8 +1076,9 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, if (transport && transport->stream_is_active(vsk) && !(sk->sk_shutdown & RCV_SHUTDOWN)) { bool data_ready_now = false; + int target = sock_rcvlowat(sk, 0, INT_MAX); int ret = transport->notify_poll_in( - vsk, 1, &data_ready_now); + vsk, target, &data_ready_now); if (ret < 0) { mask |= EPOLLERR; } else { @@ -2137,6 +2148,25 @@ out: return err; } +static int vsock_set_rcvlowat(struct sock *sk, int val) +{ + const struct vsock_transport *transport; + struct vsock_sock *vsk; + + vsk = vsock_sk(sk); + + if (val > vsk->buffer_size) + return -EINVAL; + + transport = vsk->transport; + + if (transport && transport->set_rcvlowat) + return transport->set_rcvlowat(vsk, val); + + WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); + return 0; +} + static const struct proto_ops vsock_stream_ops = { .family = PF_VSOCK, .owner = THIS_MODULE, @@ -2156,6 +2186,7 @@ static const struct proto_ops vsock_stream_ops = { .recvmsg = vsock_connectible_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, + .set_rcvlowat = vsock_set_rcvlowat, }; static const struct proto_ops vsock_seqpacket_ops = { diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index fd98229e3db3..59c3e2697069 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c @@ -815,6 +815,12 @@ int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written, return 0; } +static +int hvs_set_rcvlowat(struct vsock_sock *vsk, int val) +{ + return -EOPNOTSUPP; +} + static struct vsock_transport hvs_transport = { .module = THIS_MODULE, @@ -850,6 +856,7 @@ static struct vsock_transport hvs_transport = { .notify_send_pre_enqueue = hvs_notify_send_pre_enqueue, .notify_send_post_enqueue = hvs_notify_send_post_enqueue, + .set_rcvlowat = hvs_set_rcvlowat }; static bool hvs_check_transport(struct vsock_sock *vsk) diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index ec2c2afbf0d0..35863132f4f1 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -634,10 +634,7 @@ virtio_transport_notify_poll_in(struct vsock_sock *vsk, size_t target, bool *data_ready_now) { - if (vsock_stream_has_data(vsk)) - *data_ready_now = true; - else - *data_ready_now = false; + *data_ready_now = vsock_stream_has_data(vsk) >= target; return 0; } @@ -1084,7 +1081,7 @@ virtio_transport_recv_connected(struct sock *sk, switch (le16_to_cpu(pkt->hdr.op)) { case VIRTIO_VSOCK_OP_RW: virtio_transport_recv_enqueue(vsk, pkt); - sk->sk_data_ready(sk); + vsock_data_ready(sk); return err; case VIRTIO_VSOCK_OP_CREDIT_REQUEST: virtio_transport_send_credit_update(vsk); diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index b14f0ed7427b..842c94286d31 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -951,7 +951,7 @@ static int vmci_transport_recv_listen(struct sock *sk, * for ourself or any previous connection requests that we received. * If it's the latter, we try to find a socket in our list of pending * connections and, if we do, call the appropriate handler for the - * state that that socket is in. Otherwise we try to service the + * state that socket is in. Otherwise we try to service the * connection request. */ pending = vmci_transport_get_pending(sk, pkt); diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c index d69fc4b595ad..7c3a7db134b2 100644 --- a/net/vmw_vsock/vmci_transport_notify.c +++ b/net/vmw_vsock/vmci_transport_notify.c @@ -307,7 +307,7 @@ vmci_transport_handle_wrote(struct sock *sk, struct vsock_sock *vsk = vsock_sk(sk); PKT_FIELD(vsk, sent_waiting_read) = false; #endif - sk->sk_data_ready(sk); + vsock_data_ready(sk); } static void vmci_transport_notify_pkt_socket_init(struct sock *sk) @@ -340,12 +340,12 @@ vmci_transport_notify_pkt_poll_in(struct sock *sk, { struct vsock_sock *vsk = vsock_sk(sk); - if (vsock_stream_has_data(vsk)) { + if (vsock_stream_has_data(vsk) >= target) { *data_ready_now = true; } else { - /* We can't read right now because there is nothing in the - * queue. Ask for notifications when there is something to - * read. + /* We can't read right now because there is not enough data + * in the queue. Ask for notifications when there is something + * to read. */ if (sk->sk_state == TCP_ESTABLISHED) { if (!send_waiting_read(sk, 1)) diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c index 0f36d7c45db3..e96a88d850a8 100644 --- a/net/vmw_vsock/vmci_transport_notify_qstate.c +++ b/net/vmw_vsock/vmci_transport_notify_qstate.c @@ -84,7 +84,7 @@ vmci_transport_handle_wrote(struct sock *sk, bool bottom_half, struct sockaddr_vm *dst, struct sockaddr_vm *src) { - sk->sk_data_ready(sk); + vsock_data_ready(sk); } static void vsock_block_update_write_window(struct sock *sk) @@ -161,12 +161,12 @@ vmci_transport_notify_pkt_poll_in(struct sock *sk, { struct vsock_sock *vsk = vsock_sk(sk); - if (vsock_stream_has_data(vsk)) { + if (vsock_stream_has_data(vsk) >= target) { *data_ready_now = true; } else { - /* We can't read right now because there is nothing in the - * queue. Ask for notifications when there is something to - * read. + /* We can't read right now because there is not enough data + * in the queue. Ask for notifications when there is something + * to read. */ if (sk->sk_state == TCP_ESTABLISHED) vsock_block_update_write_window(sk); @@ -282,7 +282,7 @@ vmci_transport_notify_pkt_recv_post_dequeue( /* See the comment in * vmci_transport_notify_pkt_send_post_enqueue(). */ - sk->sk_data_ready(sk); + vsock_data_ready(sk); } return err; diff --git a/net/wireless/core.c b/net/wireless/core.c index eefd6d8ff465..5b0c4d5b80cf 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -860,6 +860,9 @@ int wiphy_register(struct wiphy *wiphy) for (i = 0; i < sband->n_iftype_data; i++) { const struct ieee80211_sband_iftype_data *iftd; + bool has_ap, has_non_ap; + u32 ap_bits = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO); iftd = &sband->iftype_data[i]; @@ -879,6 +882,19 @@ int wiphy_register(struct wiphy *wiphy) else have_he = have_he && iftd->he_cap.has_he; + + has_ap = iftd->types_mask & ap_bits; + has_non_ap = iftd->types_mask & ~ap_bits; + + /* + * For EHT 20 MHz STA, the capabilities format differs + * but to simplify, don't check 20 MHz but rather check + * only if AP and non-AP were mentioned at the same time, + * reject if so. + */ + if (WARN_ON(iftd->eht_cap.has_eht && + has_ap && has_non_ap)) + return -EINVAL; } if (WARN_ON(!have_he && band == NL80211_BAND_6GHZ)) diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index aab43469a2f0..0878b162890a 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -65,9 +65,10 @@ static ssize_t ht40allow_map_read(struct file *file, { struct wiphy *wiphy = file->private_data; char *buf; - unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; + unsigned int offset = 0, buf_size = PAGE_SIZE, i; enum nl80211_band band; struct ieee80211_supported_band *sband; + ssize_t r; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 4935f94d1acc..edd062f104f4 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -171,7 +171,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) */ if (rdev->ops->del_key) for (i = 0; i < 6; i++) - rdev_del_key(rdev, dev, i, false, NULL); + rdev_del_key(rdev, dev, -1, i, false, NULL); if (wdev->u.ibss.current_bss) { cfg80211_unhold_bss(wdev->u.ibss.current_bss); diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c index 6a5f08f7491e..cca5e1cf089e 100644 --- a/net/wireless/lib80211_crypt_ccmp.c +++ b/net/wireless/lib80211_crypt_ccmp.c @@ -136,7 +136,7 @@ static int ccmp_init_iv_and_aad(const struct ieee80211_hdr *hdr, pos = (u8 *) hdr; aad[0] = pos[0] & 0x8f; aad[1] = pos[1] & 0xc7; - memcpy(aad + 2, hdr->addr1, 3 * ETH_ALEN); + memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN); pos = (u8 *) & hdr->seq_ctrl; aad[20] = pos[0] & 0x0f; aad[21] = 0; /* all bits masked */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 2705e3ee8fc4..8ff8b1c040f0 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1545,7 +1545,6 @@ static int nl80211_key_allowed(struct wireless_dev *wdev) return -ENOLINK; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: - /* for MLO, require driver validation of the link ID */ if (wdev->connected) return 0; return -ENOLINK; @@ -1821,10 +1820,15 @@ nl80211_send_iftype_data(struct sk_buff *msg, if (eht_cap->has_eht && he_cap->has_he) { u8 mcs_nss_size, ppe_thresh_size; u16 ppe_thres_hdr; + bool is_ap; + + is_ap = iftdata->types_mask & BIT(NL80211_IFTYPE_AP) || + iftdata->types_mask & BIT(NL80211_IFTYPE_P2P_GO); mcs_nss_size = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, - &eht_cap->eht_cap_elem); + &eht_cap->eht_cap_elem, + is_ap); ppe_thres_hdr = get_unaligned_le16(&eht_cap->eht_ppe_thres[0]); ppe_thresh_size = @@ -3476,8 +3480,21 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) if (result) goto out; - result = rdev_set_txq_params(rdev, netdev, - &txq_params); + txq_params.link_id = + nl80211_link_id_or_invalid(info->attrs); + + wdev_lock(netdev->ieee80211_ptr); + if (txq_params.link_id >= 0 && + !(netdev->ieee80211_ptr->valid_links & + BIT(txq_params.link_id))) + result = -ENOLINK; + else if (txq_params.link_id >= 0 && + !netdev->ieee80211_ptr->valid_links) + result = -EINVAL; + else + result = rdev_set_txq_params(rdev, netdev, + &txq_params); + wdev_unlock(netdev->ieee80211_ptr); if (result) goto out; } @@ -3848,12 +3865,19 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag for_each_valid_link(wdev, link_id) { struct nlattr *link = nla_nest_start(msg, link_id + 1); + struct cfg80211_chan_def chandef = {}; + int ret; if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)) goto nla_put_failure; if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev->links[link_id].addr)) goto nla_put_failure; + + ret = rdev_get_channel(rdev, wdev, link_id, &chandef); + if (ret == 0 && nl80211_send_chandef(msg, &chandef)) + goto nla_put_failure; + nla_nest_end(msg, link); } @@ -4320,6 +4344,38 @@ static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) return rdev_set_noack_map(rdev, dev, noack_map); } +static int nl80211_validate_key_link_id(struct genl_info *info, + struct wireless_dev *wdev, + int link_id, bool pairwise) +{ + if (pairwise) { + if (link_id != -1) { + GENL_SET_ERR_MSG(info, + "link ID not allowed for pairwise key"); + return -EINVAL; + } + + return 0; + } + + if (wdev->valid_links) { + if (link_id == -1) { + GENL_SET_ERR_MSG(info, + "link ID must for MLO group key"); + return -EINVAL; + } + if (!(wdev->valid_links & BIT(link_id))) { + GENL_SET_ERR_MSG(info, "invalid link ID for MLO group key"); + return -EINVAL; + } + } else if (link_id != -1) { + GENL_SET_ERR_MSG(info, "link ID not allowed for non-MLO group key"); + return -EINVAL; + } + + return 0; +} + struct get_key_cookie { struct sk_buff *msg; int error; @@ -4381,13 +4437,15 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) void *hdr; struct sk_buff *msg; bool bigtk_support = false; + int link_id = nl80211_link_id_or_invalid(info->attrs); + struct wireless_dev *wdev = dev->ieee80211_ptr; if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION)) bigtk_support = true; - if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION || - dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) && + if ((wdev->iftype == NL80211_IFTYPE_STATION || + wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT)) bigtk_support = true; @@ -4439,8 +4497,12 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) goto nla_put_failure; - err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, - get_key_callback); + err = nl80211_validate_key_link_id(info, wdev, link_id, pairwise); + if (err) + goto free_msg; + + err = rdev_get_key(rdev, dev, link_id, key_idx, pairwise, mac_addr, + &cookie, get_key_callback); if (err) goto free_msg; @@ -4464,6 +4526,8 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) struct key_parse key; int err; struct net_device *dev = info->user_ptr[1]; + int link_id = nl80211_link_id_or_invalid(info->attrs); + struct wireless_dev *wdev = dev->ieee80211_ptr; err = nl80211_parse_key(info, &key); if (err) @@ -4479,7 +4543,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) !(key.p.mode == NL80211_KEY_SET_TX)) return -EINVAL; - wdev_lock(dev->ieee80211_ptr); + wdev_lock(wdev); if (key.def) { if (!rdev->ops->set_default_key) { @@ -4487,18 +4551,22 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) goto out; } - err = nl80211_key_allowed(dev->ieee80211_ptr); + err = nl80211_key_allowed(wdev); if (err) goto out; - err = rdev_set_default_key(rdev, dev, key.idx, - key.def_uni, key.def_multi); + err = nl80211_validate_key_link_id(info, wdev, link_id, false); + if (err) + goto out; + + err = rdev_set_default_key(rdev, dev, link_id, key.idx, + key.def_uni, key.def_multi); if (err) goto out; #ifdef CONFIG_CFG80211_WEXT - dev->ieee80211_ptr->wext.default_key = key.idx; + wdev->wext.default_key = key.idx; #endif } else if (key.defmgmt) { if (key.def_uni || !key.def_multi) { @@ -4511,16 +4579,20 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) goto out; } - err = nl80211_key_allowed(dev->ieee80211_ptr); + err = nl80211_key_allowed(wdev); if (err) goto out; - err = rdev_set_default_mgmt_key(rdev, dev, key.idx); + err = nl80211_validate_key_link_id(info, wdev, link_id, false); + if (err) + goto out; + + err = rdev_set_default_mgmt_key(rdev, dev, link_id, key.idx); if (err) goto out; #ifdef CONFIG_CFG80211_WEXT - dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; + wdev->wext.default_mgmt_key = key.idx; #endif } else if (key.defbeacon) { if (key.def_uni || !key.def_multi) { @@ -4533,11 +4605,15 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) goto out; } - err = nl80211_key_allowed(dev->ieee80211_ptr); + err = nl80211_key_allowed(wdev); + if (err) + goto out; + + err = nl80211_validate_key_link_id(info, wdev, link_id, false); if (err) goto out; - err = rdev_set_default_beacon_key(rdev, dev, key.idx); + err = rdev_set_default_beacon_key(rdev, dev, link_id, key.idx); if (err) goto out; } else if (key.p.mode == NL80211_KEY_SET_TX && @@ -4553,14 +4629,18 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) goto out; } - err = rdev_add_key(rdev, dev, key.idx, + err = nl80211_validate_key_link_id(info, wdev, link_id, true); + if (err) + goto out; + + err = rdev_add_key(rdev, dev, link_id, key.idx, NL80211_KEYTYPE_PAIRWISE, mac_addr, &key.p); } else { err = -EINVAL; } out: - wdev_unlock(dev->ieee80211_ptr); + wdev_unlock(wdev); return err; } @@ -4572,6 +4652,8 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) struct net_device *dev = info->user_ptr[1]; struct key_parse key; const u8 *mac_addr = NULL; + int link_id = nl80211_link_id_or_invalid(info->attrs); + struct wireless_dev *wdev = dev->ieee80211_ptr; err = nl80211_parse_key(info, &key); if (err) @@ -4613,18 +4695,23 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } - wdev_lock(dev->ieee80211_ptr); - err = nl80211_key_allowed(dev->ieee80211_ptr); + wdev_lock(wdev); + err = nl80211_key_allowed(wdev); if (err) GENL_SET_ERR_MSG(info, "key not allowed"); + + if (!err) + err = nl80211_validate_key_link_id(info, wdev, link_id, + key.type == NL80211_KEYTYPE_PAIRWISE); + if (!err) { - err = rdev_add_key(rdev, dev, key.idx, + err = rdev_add_key(rdev, dev, link_id, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr, &key.p); if (err) GENL_SET_ERR_MSG(info, "key addition failed"); } - wdev_unlock(dev->ieee80211_ptr); + wdev_unlock(wdev); return err; } @@ -4636,6 +4723,8 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) struct net_device *dev = info->user_ptr[1]; u8 *mac_addr = NULL; struct key_parse key; + int link_id = nl80211_link_id_or_invalid(info->attrs); + struct wireless_dev *wdev = dev->ieee80211_ptr; err = nl80211_parse_key(info, &key); if (err) @@ -4663,27 +4752,31 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->del_key) return -EOPNOTSUPP; - wdev_lock(dev->ieee80211_ptr); - err = nl80211_key_allowed(dev->ieee80211_ptr); + wdev_lock(wdev); + err = nl80211_key_allowed(wdev); if (key.type == NL80211_KEYTYPE_GROUP && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; if (!err) - err = rdev_del_key(rdev, dev, key.idx, + err = nl80211_validate_key_link_id(info, wdev, link_id, + key.type == NL80211_KEYTYPE_PAIRWISE); + + if (!err) + err = rdev_del_key(rdev, dev, link_id, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr); #ifdef CONFIG_CFG80211_WEXT if (!err) { - if (key.idx == dev->ieee80211_ptr->wext.default_key) - dev->ieee80211_ptr->wext.default_key = -1; - else if (key.idx == dev->ieee80211_ptr->wext.default_mgmt_key) - dev->ieee80211_ptr->wext.default_mgmt_key = -1; + if (key.idx == wdev->wext.default_key) + wdev->wext.default_key = -1; + else if (key.idx == wdev->wext.default_mgmt_key) + wdev->wext.default_mgmt_key = -1; } #endif - wdev_unlock(dev->ieee80211_ptr); + wdev_unlock(wdev); return err; } @@ -5587,7 +5680,7 @@ static int nl80211_calculate_ap_params(struct cfg80211_ap_settings *params) params->eht_cap = (void *)(cap->data + 1); if (!ieee80211_eht_capa_size_ok((const u8 *)params->he_cap, (const u8 *)params->eht_cap, - cap->datalen - 1)) + cap->datalen - 1, true)) return -EINVAL; } cap = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION, ies, ies_len); @@ -6816,7 +6909,8 @@ static int nl80211_set_station_tdls(struct genl_info *info, if (!ieee80211_eht_capa_size_ok((const u8 *)params->link_sta_params.he_capa, (const u8 *)params->link_sta_params.eht_capa, - params->link_sta_params.eht_capa_len)) + params->link_sta_params.eht_capa_len, + false)) return -EINVAL; } } @@ -7127,7 +7221,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (!ieee80211_eht_capa_size_ok((const u8 *)params.link_sta_params.he_capa, (const u8 *)params.link_sta_params.eht_capa, - params.link_sta_params.eht_capa_len)) + params.link_sta_params.eht_capa_len, + false)) return -EINVAL; } } @@ -10087,8 +10182,10 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, (nla_put_u32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_ASSOCIATED) || (wdev->valid_links && - nla_put_u8(msg, NL80211_BSS_MLO_LINK_ID, - link_id)))) + (nla_put_u8(msg, NL80211_BSS_MLO_LINK_ID, + link_id) || + nla_put(msg, NL80211_BSS_MLD_ADDR, ETH_ALEN, + wdev->u.client.connected_addr))))) goto nla_put_failure; } break; @@ -11184,7 +11281,6 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info) struct net_device *dev = info->user_ptr[1]; int mcast_rate[NUM_NL80211_BANDS]; u32 nla_rate; - int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && @@ -11203,9 +11299,7 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info) if (!nl80211_parse_mcast_rate(rdev, mcast_rate, nla_rate)) return -EINVAL; - err = rdev_set_mcast_rate(rdev, dev, mcast_rate); - - return err; + return rdev_set_mcast_rate(rdev, dev, mcast_rate); } static struct sk_buff * @@ -15887,7 +15981,8 @@ nl80211_add_mod_link_station(struct sk_buff *skb, struct genl_info *info, if (!ieee80211_eht_capa_size_ok((const u8 *)params.he_capa, (const u8 *)params.eht_capa, - params.eht_capa_len)) + params.eht_capa_len, + false)) return -EINVAL; } } @@ -17141,6 +17236,7 @@ static struct genl_family nl80211_fam __ro_after_init = { .n_ops = ARRAY_SIZE(nl80211_ops), .small_ops = nl80211_small_ops, .n_small_ops = ARRAY_SIZE(nl80211_small_ops), + .resv_start_op = NL80211_CMD_REMOVE_LINK_STA + 1, .mcgrps = nl80211_mcgrps, .n_mcgrps = ARRAY_SIZE(nl80211_mcgrps), .parallel_ops = true, @@ -18836,11 +18932,13 @@ EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, + unsigned int link_id, struct cfg80211_chan_def *chandef, gfp_t gfp, enum nl80211_commands notif, u8 count, bool quiet) { + struct wireless_dev *wdev = netdev->ieee80211_ptr; struct sk_buff *msg; void *hdr; @@ -18857,6 +18955,10 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) goto nla_put_failure; + if (wdev->valid_links && + nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)) + goto nla_put_failure; + if (nl80211_send_chandef(msg, chandef)) goto nla_put_failure; @@ -18916,22 +19018,26 @@ void cfg80211_ch_switch_notify(struct net_device *dev, cfg80211_sched_dfs_chan_update(rdev); - nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, + nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL, NL80211_CMD_CH_SWITCH_NOTIFY, 0, false); } EXPORT_SYMBOL(cfg80211_ch_switch_notify); void cfg80211_ch_switch_started_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, - u8 count, bool quiet) + unsigned int link_id, u8 count, + bool quiet) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - trace_cfg80211_ch_switch_started_notify(dev, chandef); + ASSERT_WDEV_LOCK(wdev); + WARN_INVALID_LINK_ID(wdev, link_id); + + trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id); - nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, + nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL, NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, count, quiet); } diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 40915a82da73..13b209a8db28 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -77,65 +77,69 @@ rdev_change_virtual_intf(struct cfg80211_registered_device *rdev, } static inline int rdev_add_key(struct cfg80211_registered_device *rdev, - struct net_device *netdev, u8 key_index, - bool pairwise, const u8 *mac_addr, + struct net_device *netdev, int link_id, + u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { int ret; - trace_rdev_add_key(&rdev->wiphy, netdev, key_index, pairwise, + trace_rdev_add_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr, params->mode); - ret = rdev->ops->add_key(&rdev->wiphy, netdev, key_index, pairwise, - mac_addr, params); + ret = rdev->ops->add_key(&rdev->wiphy, netdev, link_id, key_index, + pairwise, mac_addr, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, - u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, + int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, + void *cookie, void (*callback)(void *cookie, struct key_params*)) { int ret; - trace_rdev_get_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); - ret = rdev->ops->get_key(&rdev->wiphy, netdev, key_index, pairwise, - mac_addr, cookie, callback); + trace_rdev_get_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, + mac_addr); + ret = rdev->ops->get_key(&rdev->wiphy, netdev, link_id, key_index, + pairwise, mac_addr, cookie, callback); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_key(struct cfg80211_registered_device *rdev, - struct net_device *netdev, u8 key_index, - bool pairwise, const u8 *mac_addr) + struct net_device *netdev, int link_id, + u8 key_index, bool pairwise, const u8 *mac_addr) { int ret; - trace_rdev_del_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); - ret = rdev->ops->del_key(&rdev->wiphy, netdev, key_index, pairwise, - mac_addr); + trace_rdev_del_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, + mac_addr); + ret = rdev->ops->del_key(&rdev->wiphy, netdev, link_id, key_index, + pairwise, mac_addr); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_key(struct cfg80211_registered_device *rdev, - struct net_device *netdev, u8 key_index, bool unicast, - bool multicast) + struct net_device *netdev, int link_id, u8 key_index, + bool unicast, bool multicast) { int ret; - trace_rdev_set_default_key(&rdev->wiphy, netdev, key_index, + trace_rdev_set_default_key(&rdev->wiphy, netdev, link_id, key_index, unicast, multicast); - ret = rdev->ops->set_default_key(&rdev->wiphy, netdev, key_index, - unicast, multicast); + ret = rdev->ops->set_default_key(&rdev->wiphy, netdev, link_id, + key_index, unicast, multicast); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev, - struct net_device *netdev, u8 key_index) + struct net_device *netdev, int link_id, u8 key_index) { int ret; - trace_rdev_set_default_mgmt_key(&rdev->wiphy, netdev, key_index); - ret = rdev->ops->set_default_mgmt_key(&rdev->wiphy, netdev, + trace_rdev_set_default_mgmt_key(&rdev->wiphy, netdev, link_id, + key_index); + ret = rdev->ops->set_default_mgmt_key(&rdev->wiphy, netdev, link_id, key_index); trace_rdev_return_int(&rdev->wiphy, ret); return ret; @@ -143,13 +147,15 @@ rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev, static inline int rdev_set_default_beacon_key(struct cfg80211_registered_device *rdev, - struct net_device *netdev, u8 key_index) + struct net_device *netdev, int link_id, + u8 key_index) { int ret; - trace_rdev_set_default_beacon_key(&rdev->wiphy, netdev, key_index); - ret = rdev->ops->set_default_beacon_key(&rdev->wiphy, netdev, - key_index); + trace_rdev_set_default_beacon_key(&rdev->wiphy, netdev, link_id, + key_index); + ret = rdev->ops->set_default_beacon_key(&rdev->wiphy, netdev, link_id, + key_index); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index c7383ede794f..d5c7a5aa6853 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2389,6 +2389,10 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: + if (!wdev->links[link].ap.beacon_interval) + continue; + chandef = wdev->links[link].ap.chandef; + break; case NL80211_IFTYPE_MESH_POINT: if (!wdev->u.mesh.beacon_interval) continue; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 0134e5d5c81a..5382fc2003db 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -540,7 +540,7 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry, memcpy(entry->bssid, pos, ETH_ALEN); pos += ETH_ALEN; - if (length == IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM) { + if (length >= IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM) { memcpy(&entry->short_ssid, pos, sizeof(entry->short_ssid)); entry->short_ssid_valid = true; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 27fb2a0c4052..d513536617bd 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -747,6 +747,9 @@ void __cfg80211_connect_result(struct net_device *dev, if (WARN_ON(!cr->links[link].addr)) goto out; } + + if (WARN_ON(wdev->connect_keys)) + goto out; } wdev->unprot_beacon_reported = 0; @@ -1325,7 +1328,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT)) max_key_idx = 7; for (i = 0; i <= max_key_idx; i++) - rdev_del_key(rdev, dev, i, false, NULL); + rdev_del_key(rdev, dev, -1, i, false, NULL); } rdev_set_qos_map(rdev, dev, NULL); diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 10b2fd9bacb5..a405c3edbc47 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -434,13 +434,14 @@ TRACE_EVENT(rdev_change_virtual_intf, ); DECLARE_EVENT_CLASS(key_handle, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, - bool pairwise, const u8 *mac_addr), - TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr), + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, + u8 key_index, bool pairwise, const u8 *mac_addr), + TP_ARGS(wiphy, netdev, link_id, key_index, pairwise, mac_addr), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(mac_addr) + __field(int, link_id) __field(u8, key_index) __field(bool, pairwise) ), @@ -448,34 +449,38 @@ DECLARE_EVENT_CLASS(key_handle, WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(mac_addr, mac_addr); + __entry->link_id = link_id; __entry->key_index = key_index; __entry->pairwise = pairwise; ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key_index: %u, pairwise: %s, mac addr: " MAC_PR_FMT, - WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index, - BOOL_TO_STR(__entry->pairwise), MAC_PR_ARG(mac_addr)) + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " + "key_index: %u, pairwise: %s, mac addr: " MAC_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, + __entry->key_index, BOOL_TO_STR(__entry->pairwise), + MAC_PR_ARG(mac_addr)) ); DEFINE_EVENT(key_handle, rdev_get_key, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, - bool pairwise, const u8 *mac_addr), - TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr) + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, + u8 key_index, bool pairwise, const u8 *mac_addr), + TP_ARGS(wiphy, netdev, link_id, key_index, pairwise, mac_addr) ); DEFINE_EVENT(key_handle, rdev_del_key, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, - bool pairwise, const u8 *mac_addr), - TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr) + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, + u8 key_index, bool pairwise, const u8 *mac_addr), + TP_ARGS(wiphy, netdev, link_id, key_index, pairwise, mac_addr) ); TRACE_EVENT(rdev_add_key, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, - bool pairwise, const u8 *mac_addr, u8 mode), - TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr, mode), + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, + u8 key_index, bool pairwise, const u8 *mac_addr, u8 mode), + TP_ARGS(wiphy, netdev, link_id, key_index, pairwise, mac_addr, mode), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(mac_addr) + __field(int, link_id) __field(u8, key_index) __field(bool, pairwise) __field(u8, mode) @@ -484,24 +489,27 @@ TRACE_EVENT(rdev_add_key, WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(mac_addr, mac_addr); + __entry->link_id = link_id; __entry->key_index = key_index; __entry->pairwise = pairwise; __entry->mode = mode; ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key_index: %u, " - "mode: %u, pairwise: %s, mac addr: " MAC_PR_FMT, - WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index, - __entry->mode, BOOL_TO_STR(__entry->pairwise), - MAC_PR_ARG(mac_addr)) + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " + "key_index: %u, mode: %u, pairwise: %s, " + "mac addr: " MAC_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, + __entry->key_index, __entry->mode, + BOOL_TO_STR(__entry->pairwise), MAC_PR_ARG(mac_addr)) ); TRACE_EVENT(rdev_set_default_key, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, - bool unicast, bool multicast), - TP_ARGS(wiphy, netdev, key_index, unicast, multicast), + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, + u8 key_index, bool unicast, bool multicast), + TP_ARGS(wiphy, netdev, link_id, key_index, unicast, multicast), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY + __field(int, link_id) __field(u8, key_index) __field(bool, unicast) __field(bool, multicast) @@ -509,48 +517,58 @@ TRACE_EVENT(rdev_set_default_key, TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; + __entry->link_id = link_id; __entry->key_index = key_index; __entry->unicast = unicast; __entry->multicast = multicast; ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u, unicast: %s, multicast: %s", - WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index, - BOOL_TO_STR(__entry->unicast), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " + "key index: %u, unicast: %s, multicast: %s", + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, + __entry->key_index, BOOL_TO_STR(__entry->unicast), BOOL_TO_STR(__entry->multicast)) ); TRACE_EVENT(rdev_set_default_mgmt_key, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index), - TP_ARGS(wiphy, netdev, key_index), + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, + u8 key_index), + TP_ARGS(wiphy, netdev, link_id, key_index), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY + __field(int, link_id) __field(u8, key_index) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; + __entry->link_id = link_id; __entry->key_index = key_index; ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u", - WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index) + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " + "key index: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, + __entry->link_id, __entry->key_index) ); TRACE_EVENT(rdev_set_default_beacon_key, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index), - TP_ARGS(wiphy, netdev, key_index), + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, + u8 key_index), + TP_ARGS(wiphy, netdev, link_id, key_index), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY + __field(int, link_id) __field(u8, key_index) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; + __entry->link_id = link_id; __entry->key_index = key_index; ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u", - WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index) + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " + "key index: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, + __entry->link_id, __entry->key_index) ); TRACE_EVENT(rdev_start_ap, @@ -3245,18 +3263,21 @@ TRACE_EVENT(cfg80211_ch_switch_notify, TRACE_EVENT(cfg80211_ch_switch_started_notify, TP_PROTO(struct net_device *netdev, - struct cfg80211_chan_def *chandef), - TP_ARGS(netdev, chandef), + struct cfg80211_chan_def *chandef, + unsigned int link_id), + TP_ARGS(netdev, chandef, link_id), TP_STRUCT__entry( NETDEV_ENTRY CHAN_DEF_ENTRY + __field(unsigned int, link_id) ), TP_fast_assign( NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); + __entry->link_id = link_id; ), - TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT, - NETDEV_PR_ARG, CHAN_DEF_PR_ARG) + TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d", + NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id) ); TRACE_EVENT(cfg80211_radar_event, diff --git a/net/wireless/util.c b/net/wireless/util.c index 2c127951764a..0b28d00ba8f5 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -935,13 +935,13 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev) for (i = 0; i < CFG80211_MAX_WEP_KEYS; i++) { if (!wdev->connect_keys->params[i].cipher) continue; - if (rdev_add_key(rdev, dev, i, false, NULL, + if (rdev_add_key(rdev, dev, -1, i, false, NULL, &wdev->connect_keys->params[i])) { netdev_err(dev, "failed to set key %d\n", i); continue; } if (wdev->connect_keys->def == i && - rdev_set_default_key(rdev, dev, i, true, true)) { + rdev_set_default_key(rdev, dev, -1, i, true, true)) { netdev_err(dev, "failed to set defkey %d\n", i); continue; } diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index a9767bfe7330..ddf340bfa07a 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -470,7 +470,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; else - err = rdev_del_key(rdev, dev, idx, pairwise, + err = rdev_del_key(rdev, dev, -1, idx, pairwise, addr); } wdev->wext.connect.privacy = false; @@ -509,7 +509,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, if (wdev->connected || (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->u.ibss.current_bss)) - err = rdev_add_key(rdev, dev, idx, pairwise, addr, params); + err = rdev_add_key(rdev, dev, -1, idx, pairwise, addr, params); else if (params->cipher != WLAN_CIPHER_SUITE_WEP40 && params->cipher != WLAN_CIPHER_SUITE_WEP104) return -EINVAL; @@ -546,7 +546,8 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, __cfg80211_leave_ibss(rdev, wdev->netdev, true); rejoin = true; } - err = rdev_set_default_key(rdev, dev, idx, true, true); + err = rdev_set_default_key(rdev, dev, -1, idx, true, + true); } if (!err) { wdev->wext.default_key = idx; @@ -561,7 +562,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, if (wdev->connected || (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->u.ibss.current_bss)) - err = rdev_set_default_mgmt_key(rdev, dev, idx); + err = rdev_set_default_mgmt_key(rdev, dev, -1, idx); if (!err) wdev->wext.default_mgmt_key = idx; return err; @@ -632,7 +633,7 @@ static int cfg80211_wext_siwencode(struct net_device *dev, if (wdev->connected || (wdev->iftype == NL80211_IFTYPE_ADHOC && wdev->u.ibss.current_bss)) - err = rdev_set_default_key(rdev, dev, idx, true, + err = rdev_set_default_key(rdev, dev, -1, idx, true, true); if (!err) wdev->wext.default_key = idx; @@ -685,6 +686,13 @@ static int cfg80211_wext_siwencodeext(struct net_device *dev, !rdev->ops->set_default_key) return -EOPNOTSUPP; + wdev_lock(wdev); + if (wdev->valid_links) { + wdev_unlock(wdev); + return -EOPNOTSUPP; + } + wdev_unlock(wdev); + switch (ext->alg) { case IW_ENCODE_ALG_NONE: remove = true; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index f70112176b7c..a71a8c6edf55 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -379,6 +379,16 @@ static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) { + if (!pool->unaligned) { + u32 i; + + for (i = 0; i < pool->heads_cnt; i++) { + struct xdp_buff_xsk *xskb = &pool->heads[i]; + + xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); + } + } + pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); if (!pool->dma_pages) return -ENOMEM; @@ -428,12 +438,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, if (pool->unaligned) xp_check_dma_contiguity(dma_map); - else - for (i = 0; i < pool->heads_cnt; i++) { - struct xdp_buff_xsk *xskb = &pool->heads[i]; - - xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); - } err = xp_init_dma_info(pool, dma_map); if (err) { diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index 82d14eea1b5a..974eb97b77d2 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -168,7 +168,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) { struct espintcp_ctx *ctx = espintcp_getctx(sk); - if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog) + if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog)) return -ENOBUFS; __skb_queue_tail(&ctx->out_queue, skb); diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 144238a50f3d..b2f4ec9c537f 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -669,7 +669,6 @@ resume: x->curlft.bytes += skb->len; x->curlft.packets++; - x->curlft.use_time = ktime_get_real_seconds(); spin_unlock(&x->lock); @@ -783,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, trans = this_cpu_ptr(&xfrm_trans_tasklet); - if (skb_queue_len(&trans->queue) >= netdev_max_backlog) + if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog)) return -ENOBUFS; BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb)); diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 555ab35cd119..9a5e79a38c67 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -534,7 +534,6 @@ static int xfrm_output_one(struct sk_buff *skb, int err) x->curlft.bytes += skb->len; x->curlft.packets++; - x->curlft.use_time = ktime_get_real_seconds(); spin_unlock_bh(&x->lock); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f1a0bab920a5..cc6ab79609e2 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3162,7 +3162,7 @@ ok: return dst; nopol: - if (!(dst_orig->dev->flags & IFF_LOOPBACK) && + if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) && net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) { err = -EPERM; goto error; @@ -3599,6 +3599,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, if (pols[1]) { if (IS_ERR(pols[1])) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); + xfrm_pol_put(pols[0]); return 0; } pols[1]->curlft.use_time = ktime_get_real_seconds(); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 52e60e607f8a..91c32a3b6924 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1592,6 +1592,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, x->replay = orig->replay; x->preplay = orig->preplay; x->mapping_maxage = orig->mapping_maxage; + x->lastused = orig->lastused; x->new_mapping = 0; x->new_mapping_sport = 0; |