From 119363c7dc2bcc0c33c255a7b4979c8c0fdc1896 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 22 Apr 2013 16:29:30 +0200 Subject: cfg80211: add support for per-chain signal strength reporting Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 14 ++++++++++++++ include/uapi/linux/nl80211.h | 6 ++++++ 2 files changed, 20 insertions(+) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 26b5b692c22b..87f7e1d060ab 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -753,6 +753,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @STATION_INFO_LOCAL_PM: @local_pm filled * @STATION_INFO_PEER_PM: @peer_pm filled * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled + * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled + * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled */ enum station_info_flags { STATION_INFO_INACTIVE_TIME = 1<<0, @@ -781,6 +783,8 @@ enum station_info_flags { STATION_INFO_NONPEER_PM = 1<<23, STATION_INFO_RX_BYTES64 = 1<<24, STATION_INFO_TX_BYTES64 = 1<<25, + STATION_INFO_CHAIN_SIGNAL = 1<<26, + STATION_INFO_CHAIN_SIGNAL_AVG = 1<<27, }; /** @@ -857,6 +861,8 @@ struct sta_bss_parameters { u16 beacon_interval; }; +#define IEEE80211_MAX_CHAINS 4 + /** * struct station_info - station information * @@ -874,6 +880,9 @@ struct sta_bss_parameters { * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. * @signal_avg: Average signal strength, type depends on the wiphy's signal_type. * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. + * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg + * @chain_signal: per-chain signal strength of last received packet in dBm + * @chain_signal_avg: per-chain signal strength average in dBm * @txrate: current unicast bitrate from this station * @rxrate: current unicast bitrate to this station * @rx_packets: packets received from this station @@ -909,6 +918,11 @@ struct station_info { u8 plink_state; s8 signal; s8 signal_avg; + + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; + s8 chain_signal_avg[IEEE80211_MAX_CHAINS]; + struct rate_info txrate; struct rate_info rxrate; u32 rx_packets; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d1e48b5e348f..cbf1e228650f 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1991,6 +1991,10 @@ enum nl80211_sta_bss_param { * @NL80211_STA_INFO_PEER_PM: peer mesh STA link-specific power mode * @NL80211_STA_INFO_NONPEER_PM: neighbor mesh STA power save mode towards * non-peer STA + * @NL80211_STA_INFO_CHAIN_SIGNAL: per-chain signal strength of last PPDU + * Contains a nested array of signal strength attributes (u8, dBm) + * @NL80211_STA_INFO_CHAIN_SIGNAL_AVG: per-chain signal strength average + * Same format as NL80211_STA_INFO_CHAIN_SIGNAL. * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -2020,6 +2024,8 @@ enum nl80211_sta_info { NL80211_STA_INFO_NONPEER_PM, NL80211_STA_INFO_RX_BYTES64, NL80211_STA_INFO_TX_BYTES64, + NL80211_STA_INFO_CHAIN_SIGNAL, + NL80211_STA_INFO_CHAIN_SIGNAL_AVG, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, -- cgit v1.2.3 From ef0621e805f9ef76eaf31ce6205028fe467e9ca9 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 22 Apr 2013 16:29:31 +0200 Subject: mac80211: add support for per-chain signal strength reporting Signed-off-by: Felix Fietkau [fix unit documentation] Signed-off-by: Johannes Berg --- include/net/mac80211.h | 6 ++++++ net/mac80211/cfg.c | 13 ++++++++++++- net/mac80211/rx.c | 14 ++++++++++++++ net/mac80211/sta_info.c | 2 ++ net/mac80211/sta_info.h | 5 +++++ 5 files changed, 39 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 04c2d4670dc6..5953f25e63f5 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -850,6 +850,10 @@ enum mac80211_rx_flags { * @signal: signal strength when receiving this frame, either in dBm, in dB or * unspecified depending on the hardware capabilities flags * @IEEE80211_HW_SIGNAL_* + * @chains: bitmask of receive chains for which separate signal strength + * values were filled. + * @chain_signal: per-chain signal strength, in dBm (unlike @signal, doesn't + * support dB or unspecified units) * @antenna: antenna used * @rate_idx: index of data rate into band's supported rates or MCS index if * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) @@ -881,6 +885,8 @@ struct ieee80211_rx_status { u8 band; u8 antenna; s8 signal; + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 ampdu_delimiter_crc; u8 vendor_radiotap_align; u8 vendor_radiotap_oui[3]; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 1a89c80e6407..1f51bdfe574a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -444,7 +444,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) struct ieee80211_local *local = sdata->local; struct timespec uptime; u64 packets = 0; - int ac; + int i, ac; sinfo->generation = sdata->local->sta_generation; @@ -488,6 +488,17 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->signal = (s8)sta->last_signal; sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); } + if (sta->chains) { + sinfo->filled |= STATION_INFO_CHAIN_SIGNAL | + STATION_INFO_CHAIN_SIGNAL_AVG; + + sinfo->chains = sta->chains; + for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { + sinfo->chain_signal[i] = sta->chain_signal_last[i]; + sinfo->chain_signal_avg[i] = + (s8) -ewma_read(&sta->chain_signal_avg[i]); + } + } sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate); sta_set_rate_info_rx(sta, &sinfo->rxrate); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c8447af76ead..22e412b0767f 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1372,6 +1372,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int i; if (!sta) return RX_CONTINUE; @@ -1422,6 +1423,19 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) ewma_add(&sta->avg_signal, -status->signal); } + if (status->chains) { + sta->chains = status->chains; + for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { + int signal = status->chain_signal[i]; + + if (!(status->chains & BIT(i))) + continue; + + sta->chain_signal_last[i] = signal; + ewma_add(&sta->chain_signal_avg[i], -signal); + } + } + /* * Change STA power saving mode only at the end of a frame * exchange sequence. diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 11216bc13b27..a04c5671d7fd 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -358,6 +358,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, do_posix_clock_monotonic_gettime(&uptime); sta->last_connected = uptime.tv_sec; ewma_init(&sta->avg_signal, 1024, 8); + for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++) + ewma_init(&sta->chain_signal_avg[i], 1024, 8); if (sta_prepare_rate_control(local, sta, gfp)) { kfree(sta); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index adc30045f99e..41c28b977f7c 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -344,6 +344,11 @@ struct sta_info { int last_signal; struct ewma avg_signal; int last_ack_signal; + + u8 chains; + s8 chain_signal_last[IEEE80211_MAX_CHAINS]; + struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS]; + /* Plus 1 for non-QoS frames */ __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; -- cgit v1.2.3 From fb4e156886ce6e8309e912d8b370d192330d19d3 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Sun, 28 Apr 2013 16:22:06 -0700 Subject: nl80211: Add generic netlink module alias for cfg80211/nl80211 To support auto-loading of wireless modules from netlink users, add module alias for nl80211 family. This also adds NL80211_GENL_NAME constant to define the "nl80211" netlink family name as part of uapi. Signed-off-by: Marcel Holtmann Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 2 ++ net/wireless/core.c | 1 + net/wireless/nl80211.c | 8 ++++---- 3 files changed, 7 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index cbf1e228650f..b48430769eda 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -27,6 +27,8 @@ #include +#define NL80211_GENL_NAME "nl80211" + /** * DOC: Station handling * diff --git a/net/wireless/core.c b/net/wireless/core.c index 84c9ad7e1dca..68f0c96c0565 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -34,6 +34,7 @@ MODULE_AUTHOR("Johannes Berg"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("wireless configuration support"); +MODULE_ALIAS_GENL_FAMILY(NL80211_GENL_NAME); /* RCU-protected (and cfg80211_mutex for writers) */ LIST_HEAD(cfg80211_rdev_list); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f687a8d0d026..9cdcd9ec3317 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -37,10 +37,10 @@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, /* the netlink family */ static struct genl_family nl80211_fam = { - .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ - .name = "nl80211", /* have users key off the name instead */ - .hdrsize = 0, /* no private header */ - .version = 1, /* no particular meaning now */ + .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ + .name = NL80211_GENL_NAME, /* have users key off the name instead */ + .hdrsize = 0, /* no private header */ + .version = 1, /* no particular meaning now */ .maxattr = NL80211_ATTR_MAX, .netnsok = true, .pre_doit = nl80211_pre_doit, -- cgit v1.2.3 From 03f831a6f7dc02303df20718a4e96821e43b3d0c Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Thu, 2 May 2013 07:15:09 -0400 Subject: wireless: fix kerneldoc content in *80211.h files. Make kerneldoc content match header file content, no functional change. Signed-off-by: Robert P. J. Day Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 1 + include/net/mac80211.h | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 87f7e1d060ab..26e91138a2c6 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4167,6 +4167,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, * cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver. * * @wdev: the wireless device for which critical protocol is stopped. + * @gfp: allocation flags * * This function can be called by the driver to indicate it has reverted * operation back to normal. One reason could be that the duration given diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5953f25e63f5..c36535e7b291 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1241,7 +1241,7 @@ enum ieee80211_sta_rx_bandwidth { * struct ieee80211_sta_rates - station rate selection table * * @rcu_head: RCU head used for freeing the table on update - * @rates: transmit rates/flags to be used by default. + * @rate: transmit rates/flags to be used by default. * Overriding entries per-packet is possible by using cb tx control. */ struct ieee80211_sta_rates { @@ -1282,7 +1282,7 @@ struct ieee80211_sta_rates { * notifications and capabilities. The value is only valid after * the station moves to associated state. * @smps_mode: current SMPS mode (off, static or dynamic) - * @tx_rates: rate control selection table + * @rates: rate control selection table */ struct ieee80211_sta { u32 supp_rates[IEEE80211_NUM_BANDS]; -- cgit v1.2.3 From 4325f6caad98c075b39f0eaaac6693a0dd43f646 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 8 May 2013 13:09:08 +0200 Subject: wireless: move crypto constants to ieee80211.h mac80211 and the Intel drivers all define crypto constants, move them to ieee80211.h instead. Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlegacy/commands.h | 8 ---- drivers/net/wireless/iwlwifi/dvm/commands.h | 8 ---- drivers/net/wireless/iwlwifi/pcie/tx.c | 6 +-- include/linux/ieee80211.h | 9 ++++ net/mac80211/aes_ccm.c | 6 +-- net/mac80211/key.c | 24 +++++----- net/mac80211/key.h | 15 +------ net/mac80211/rx.c | 12 ++--- net/mac80211/wep.c | 48 ++++++++++---------- net/mac80211/wpa.c | 68 +++++++++++++++-------------- 10 files changed, 96 insertions(+), 108 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h index 3b6c99400892..048421511988 100644 --- a/drivers/net/wireless/iwlegacy/commands.h +++ b/drivers/net/wireless/iwlegacy/commands.h @@ -1347,14 +1347,6 @@ struct il_rx_mpdu_res_start { #define TX_CMD_SEC_SHIFT 6 #define TX_CMD_SEC_KEY128 0x08 -/* - * security overhead sizes - */ -#define WEP_IV_LEN 4 -#define WEP_ICV_LEN 4 -#define CCMP_MIC_LEN 8 -#define TKIP_ICV_LEN 4 - /* * C_TX = 0x1c (command) */ diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h index 95ca026ecc9d..174b0f169497 100644 --- a/drivers/net/wireless/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/iwlwifi/dvm/commands.h @@ -1224,14 +1224,6 @@ struct iwl_rx_mpdu_res_start { #define TX_CMD_SEC_SHIFT 6 #define TX_CMD_SEC_KEY128 0x08 -/* - * security overhead sizes - */ -#define WEP_IV_LEN 4 -#define WEP_ICV_LEN 4 -#define CCMP_MIC_LEN 8 -#define TKIP_ICV_LEN 4 - /* * REPLY_TX = 0x1c (command) */ diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index c5e30294c5ac..2878ee99a668 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -224,13 +224,13 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: - len += CCMP_MIC_LEN; + len += IEEE80211_CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: - len += TKIP_ICV_LEN; + len += IEEE80211_TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: - len += WEP_IV_LEN + WEP_ICV_LEN; + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 06b0ed0154a4..d826e5a84af0 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1829,6 +1829,15 @@ enum ieee80211_key_len { WLAN_KEY_LEN_AES_CMAC = 16, }; +#define IEEE80211_WEP_IV_LEN 4 +#define IEEE80211_WEP_ICV_LEN 4 +#define IEEE80211_CCMP_HDR_LEN 8 +#define IEEE80211_CCMP_MIC_LEN 8 +#define IEEE80211_CCMP_PN_LEN 6 +#define IEEE80211_TKIP_IV_LEN 8 +#define IEEE80211_TKIP_ICV_LEN 4 +#define IEEE80211_CMAC_PN_LEN 6 + /* Public action codes */ enum ieee80211_pub_actioncode { WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index 0785e95c9924..be7614b9ed27 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c @@ -85,7 +85,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, *cpos++ = *pos++ ^ e[i]; } - for (i = 0; i < CCMP_MIC_LEN; i++) + for (i = 0; i < IEEE80211_CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s_0[i]; } @@ -123,7 +123,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, crypto_cipher_encrypt_one(tfm, a, a); } - for (i = 0; i < CCMP_MIC_LEN; i++) { + for (i = 0; i < IEEE80211_CCMP_MIC_LEN; i++) { if ((mic[i] ^ s_0[i]) != a[i]) return -1; } @@ -138,7 +138,7 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]) tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (!IS_ERR(tfm)) - crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN); + crypto_cipher_setkey(tfm, key, WLAN_KEY_LEN_CCMP); return tfm; } diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 67059b88fea5..e39cc91d0cf1 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -335,12 +335,12 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - key->conf.iv_len = WEP_IV_LEN; - key->conf.icv_len = WEP_ICV_LEN; + key->conf.iv_len = IEEE80211_WEP_IV_LEN; + key->conf.icv_len = IEEE80211_WEP_ICV_LEN; break; case WLAN_CIPHER_SUITE_TKIP: - key->conf.iv_len = TKIP_IV_LEN; - key->conf.icv_len = TKIP_ICV_LEN; + key->conf.iv_len = IEEE80211_TKIP_IV_LEN; + key->conf.icv_len = IEEE80211_TKIP_ICV_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { key->u.tkip.rx[i].iv32 = @@ -352,13 +352,13 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, spin_lock_init(&key->u.tkip.txlock); break; case WLAN_CIPHER_SUITE_CCMP: - key->conf.iv_len = CCMP_HDR_LEN; - key->conf.icv_len = CCMP_MIC_LEN; + key->conf.iv_len = IEEE80211_CCMP_HDR_LEN; + key->conf.icv_len = IEEE80211_CCMP_MIC_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) - for (j = 0; j < CCMP_PN_LEN; j++) + for (j = 0; j < IEEE80211_CCMP_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = - seq[CCMP_PN_LEN - j - 1]; + seq[IEEE80211_CCMP_PN_LEN - j - 1]; } /* * Initialize AES key state here as an optimization so that @@ -375,9 +375,9 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, key->conf.iv_len = 0; key->conf.icv_len = sizeof(struct ieee80211_mmie); if (seq) - for (j = 0; j < CMAC_PN_LEN; j++) + for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++) key->u.aes_cmac.rx_pn[j] = - seq[CMAC_PN_LEN - j - 1]; + seq[IEEE80211_CMAC_PN_LEN - j - 1]; /* * Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. @@ -740,13 +740,13 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.ccmp.rx_pn[tid]; - memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN); + memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; - memcpy(seq->aes_cmac.pn, pn, CMAC_PN_LEN); + memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN); break; } } diff --git a/net/mac80211/key.h b/net/mac80211/key.h index e8de3e6d7804..036d57e76a5e 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -19,17 +19,6 @@ #define NUM_DEFAULT_KEYS 4 #define NUM_DEFAULT_MGMT_KEYS 2 -#define WEP_IV_LEN 4 -#define WEP_ICV_LEN 4 -#define ALG_CCMP_KEY_LEN 16 -#define CCMP_HDR_LEN 8 -#define CCMP_MIC_LEN 8 -#define CCMP_TK_LEN 16 -#define CCMP_PN_LEN 6 -#define TKIP_IV_LEN 8 -#define TKIP_ICV_LEN 4 -#define CMAC_PN_LEN 6 - struct ieee80211_local; struct ieee80211_sub_if_data; struct sta_info; @@ -93,13 +82,13 @@ struct ieee80211_key { * frames and the last counter is used with Robust * Management frames. */ - u8 rx_pn[IEEE80211_NUM_TIDS + 1][CCMP_PN_LEN]; + u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_CCMP_PN_LEN]; struct crypto_cipher *tfm; u32 replays; /* dot11RSNAStatsCCMPReplays */ } ccmp; struct { atomic64_t tx_pn; - u8 rx_pn[CMAC_PN_LEN]; + u8 rx_pn[IEEE80211_CMAC_PN_LEN]; struct crypto_cipher *tfm; u32 replays; /* dot11RSNAStatsCMACReplays */ u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 22e412b0767f..6e2c8c5236c4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1622,7 +1622,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) entry->ccmp = 1; memcpy(entry->last_pn, rx->key->u.ccmp.rx_pn[queue], - CCMP_PN_LEN); + IEEE80211_CCMP_PN_LEN); } return RX_QUEUED; } @@ -1641,21 +1641,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) * (IEEE 802.11i, 8.3.3.4.5) */ if (entry->ccmp) { int i; - u8 pn[CCMP_PN_LEN], *rpn; + u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; int queue; if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP) return RX_DROP_UNUSABLE; - memcpy(pn, entry->last_pn, CCMP_PN_LEN); - for (i = CCMP_PN_LEN - 1; i >= 0; i--) { + memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); + for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { pn[i]++; if (pn[i]) break; } queue = rx->security_idx; rpn = rx->key->u.ccmp.rx_pn[queue]; - if (memcmp(pn, rpn, CCMP_PN_LEN)) + if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) return RX_DROP_UNUSABLE; - memcpy(entry->last_pn, pn, CCMP_PN_LEN); + memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); } skb_pull(rx->skb, ieee80211_hdrlen(fc)); diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index c04d401dae92..6ee2b5863572 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -28,7 +28,7 @@ int ieee80211_wep_init(struct ieee80211_local *local) { /* start WEP IV from a random value */ - get_random_bytes(&local->wep_iv, WEP_IV_LEN); + get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN); local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(local->wep_tx_tfm)) { @@ -98,20 +98,21 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); - if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN || - skb_headroom(skb) < WEP_IV_LEN)) + if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN || + skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) return NULL; hdrlen = ieee80211_hdrlen(hdr->frame_control); - newhdr = skb_push(skb, WEP_IV_LEN); - memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); + newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN); + memmove(newhdr, newhdr + IEEE80211_WEP_IV_LEN, hdrlen); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return newhdr + hdrlen; - skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN); + skb_set_network_header(skb, skb_network_offset(skb) + + IEEE80211_WEP_IV_LEN); ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); return newhdr + hdrlen; } @@ -125,8 +126,8 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local, unsigned int hdrlen; hdrlen = ieee80211_hdrlen(hdr->frame_control); - memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); - skb_pull(skb, WEP_IV_LEN); + memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_WEP_IV_LEN); } @@ -146,7 +147,7 @@ int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, put_unaligned(icv, (__le32 *)(data + data_len)); crypto_cipher_setkey(tfm, rc4key, klen); - for (i = 0; i < data_len + WEP_ICV_LEN; i++) + for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) crypto_cipher_encrypt_one(tfm, data + i, data + i); return 0; @@ -172,7 +173,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, if (!iv) return -1; - len = skb->len - (iv + WEP_IV_LEN - skb->data); + len = skb->len - (iv + IEEE80211_WEP_IV_LEN - skb->data); /* Prepend 24-bit IV to RC4 key */ memcpy(rc4key, iv, 3); @@ -181,10 +182,10 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, memcpy(rc4key + 3, key, keylen); /* Add room for ICV */ - skb_put(skb, WEP_ICV_LEN); + skb_put(skb, IEEE80211_WEP_ICV_LEN); return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, - iv + WEP_IV_LEN, len); + iv + IEEE80211_WEP_IV_LEN, len); } @@ -201,11 +202,11 @@ int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, return -1; crypto_cipher_setkey(tfm, rc4key, klen); - for (i = 0; i < data_len + WEP_ICV_LEN; i++) + for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) crypto_cipher_decrypt_one(tfm, data + i, data + i); crc = cpu_to_le32(~crc32_le(~0, data, data_len)); - if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0) + if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0) /* ICV mismatch */ return -1; @@ -237,10 +238,10 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local, return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control); - if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN) + if (skb->len < hdrlen + IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN) return -1; - len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN; + len = skb->len - hdrlen - IEEE80211_WEP_IV_LEN - IEEE80211_WEP_ICV_LEN; keyidx = skb->data[hdrlen + 3] >> 6; @@ -256,16 +257,16 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local, memcpy(rc4key + 3, key->conf.key, key->conf.keylen); if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, - skb->data + hdrlen + WEP_IV_LEN, - len)) + skb->data + hdrlen + + IEEE80211_WEP_IV_LEN, len)) ret = -1; /* Trim ICV */ - skb_trim(skb, skb->len - WEP_ICV_LEN); + skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); /* Remove IV */ - memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); - skb_pull(skb, WEP_IV_LEN); + memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_WEP_IV_LEN); return ret; } @@ -305,13 +306,14 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) return RX_DROP_UNUSABLE; } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { - if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + WEP_IV_LEN)) + if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + + IEEE80211_WEP_IV_LEN)) return RX_DROP_UNUSABLE; if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key)) rx->sta->wep_weak_iv_count++; ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); /* remove ICV */ - if (pskb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN)) + if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) return RX_DROP_UNUSABLE; } diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index c7c6d644486f..c9edfcb7a13b 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -62,10 +62,10 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) tail = MICHAEL_MIC_LEN; if (!info->control.hw_key) - tail += TKIP_ICV_LEN; + tail += IEEE80211_TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || - skb_headroom(skb) < TKIP_IV_LEN)) + skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) return TX_DROP; key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; @@ -198,15 +198,16 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) if (info->control.hw_key) tail = 0; else - tail = TKIP_ICV_LEN; + tail = IEEE80211_TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || - skb_headroom(skb) < TKIP_IV_LEN)) + skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) return -1; - pos = skb_push(skb, TKIP_IV_LEN); - memmove(pos, pos + TKIP_IV_LEN, hdrlen); - skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN); + pos = skb_push(skb, IEEE80211_TKIP_IV_LEN); + memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen); + skb_set_network_header(skb, skb_network_offset(skb) + + IEEE80211_TKIP_IV_LEN); pos += hdrlen; /* the HW only needs room for the IV, but not the actual IV */ @@ -227,7 +228,7 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) return 0; /* Add room for ICV */ - skb_put(skb, TKIP_ICV_LEN); + skb_put(skb, IEEE80211_TKIP_ICV_LEN); return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, key, skb, pos, len); @@ -290,11 +291,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; /* Trim ICV */ - skb_trim(skb, skb->len - TKIP_ICV_LEN); + skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); /* Remove IV */ - memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); - skb_pull(skb, TKIP_IV_LEN); + memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_TKIP_IV_LEN); return RX_CONTINUE; } @@ -337,9 +338,9 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, else qos_tid = 0; - data_len = skb->len - hdrlen - CCMP_HDR_LEN; + data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN; if (encrypted) - data_len -= CCMP_MIC_LEN; + data_len -= IEEE80211_CCMP_MIC_LEN; /* First block, b_0 */ b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ @@ -348,7 +349,7 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, */ b_0[1] = qos_tid | (mgmt << 4); memcpy(&b_0[2], hdr->addr2, ETH_ALEN); - memcpy(&b_0[8], pn, CCMP_PN_LEN); + memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN); /* l(m) */ put_unaligned_be16(data_len, &b_0[14]); @@ -424,15 +425,16 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) if (info->control.hw_key) tail = 0; else - tail = CCMP_MIC_LEN; + tail = IEEE80211_CCMP_MIC_LEN; if (WARN_ON(skb_tailroom(skb) < tail || - skb_headroom(skb) < CCMP_HDR_LEN)) + skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN)) return -1; - pos = skb_push(skb, CCMP_HDR_LEN); - memmove(pos, pos + CCMP_HDR_LEN, hdrlen); - skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN); + pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN); + memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen); + skb_set_network_header(skb, skb_network_offset(skb) + + IEEE80211_CCMP_HDR_LEN); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && @@ -457,10 +459,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) if (info->control.hw_key) return 0; - pos += CCMP_HDR_LEN; + pos += IEEE80211_CCMP_HDR_LEN; ccmp_special_blocks(skb, pn, scratch, 0); ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, pos, len, - pos, skb_put(skb, CCMP_MIC_LEN)); + pos, skb_put(skb, IEEE80211_CCMP_MIC_LEN)); return 0; } @@ -490,7 +492,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - u8 pn[CCMP_PN_LEN]; + u8 pn[IEEE80211_CCMP_PN_LEN]; int data_len; int queue; @@ -500,12 +502,13 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) !ieee80211_is_robust_mgmt_frame(hdr)) return RX_CONTINUE; - data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; + data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - + IEEE80211_CCMP_MIC_LEN; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; if (status->flag & RX_FLAG_DECRYPTED) { - if (!pskb_may_pull(rx->skb, hdrlen + CCMP_HDR_LEN)) + if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; } else { if (skb_linearize(rx->skb)) @@ -516,7 +519,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) queue = rx->security_idx; - if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) { + if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) { key->u.ccmp.replays++; return RX_DROP_UNUSABLE; } @@ -528,19 +531,20 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) if (ieee80211_aes_ccm_decrypt( key->u.ccmp.tfm, scratch, - skb->data + hdrlen + CCMP_HDR_LEN, data_len, - skb->data + skb->len - CCMP_MIC_LEN, - skb->data + hdrlen + CCMP_HDR_LEN)) + skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN, + data_len, + skb->data + skb->len - IEEE80211_CCMP_MIC_LEN, + skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; } - memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); + memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); /* Remove CCMP header and MIC */ - if (pskb_trim(skb, skb->len - CCMP_MIC_LEN)) + if (pskb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN)) return RX_DROP_UNUSABLE; - memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); - skb_pull(skb, CCMP_HDR_LEN); + memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_CCMP_HDR_LEN); return RX_CONTINUE; } -- cgit v1.2.3 From 6e16d90b5218307db805e6b3e0b06d3946eb8c4c Mon Sep 17 00:00:00 2001 From: Colleen Twitty Date: Wed, 8 May 2013 11:45:59 -0700 Subject: cfg80211: Userspace may inform kernel of mesh auth method. Authentication takes place in userspace, but the beacon is generated in the kernel. Allow userspace to inform the kernel of the authentication method so the appropriate mesh config IE can be set prior to beacon generation when joining the MBSS. Signed-off-by: Colleen Twitty Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 5 +++++ net/wireless/mesh.c | 1 + net/wireless/nl80211.c | 8 ++++++++ 4 files changed, 16 insertions(+) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 26e91138a2c6..32a2f1b20861 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1161,6 +1161,7 @@ struct mesh_config { * @sync_method: which synchronization method to use * @path_sel_proto: which path selection protocol to use * @path_metric: which metric to use + * @auth_id: which authentication method this mesh is using * @ie: vendor information elements (optional) * @ie_len: length of vendor information elements * @is_authenticated: this mesh requires authentication @@ -1179,6 +1180,7 @@ struct mesh_setup { u8 sync_method; u8 path_sel_proto; u8 path_metric; + u8 auth_id; const u8 *ie; u8 ie_len; bool is_authenticated; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index b48430769eda..06320713e9c9 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2645,6 +2645,10 @@ enum nl80211_meshconf_params { * @NL80211_MESH_SETUP_USERSPACE_MPM: Enable this option if userspace will * implement an MPM which handles peer allocation and state. * + * @NL80211_MESH_SETUP_AUTH_PROTOCOL: Inform the kernel of the authentication + * method (u8, as defined in IEEE 8.4.2.100.6, e.g. 0x1 for SAE). + * Default is no authentication method required. + * * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number * * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use @@ -2658,6 +2662,7 @@ enum nl80211_mesh_setup_params { NL80211_MESH_SETUP_USERSPACE_AMPE, NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, NL80211_MESH_SETUP_USERSPACE_MPM, + NL80211_MESH_SETUP_AUTH_PROTOCOL, /* keep last */ __NL80211_MESH_SETUP_ATTR_AFTER_LAST, diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 0bb93f3061a4..9546ad210550 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -82,6 +82,7 @@ const struct mesh_setup default_mesh_setup = { .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, .path_metric = IEEE80211_PATH_METRIC_AIRTIME, + .auth_id = 0, /* open */ .ie = NULL, .ie_len = 0, .is_secure = false, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 9cdcd9ec3317..5f10f7acfa06 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4672,6 +4672,7 @@ static const struct nla_policy [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, + [NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, @@ -4857,6 +4858,13 @@ static int nl80211_parse_mesh_setup(struct genl_info *info, if (setup->is_secure) setup->user_mpm = true; + if (tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]) { + if (!setup->user_mpm) + return -EINVAL; + setup->auth_id = + nla_get_u8(tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]); + } + return 0; } -- cgit v1.2.3 From caeaba79009c2ee858c3b2bf8caf922cd719fead Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 16 May 2013 22:32:00 +0000 Subject: ipv6: add support of peer address This patch adds the support of peer address for IPv6. For example, it is possible to specify the remote end of a 6inY tunnel. This was already possible in IPv4: ip addr add ip1 peer ip2 dev dev1 The peer address is specified with IFA_ADDRESS and the local address with IFA_LOCAL (like explained in include/uapi/linux/if_addr.h). Note that the API is not changed, because before this patch, it was not possible to specify two different addresses in IFA_LOCAL and IFA_REMOTE. There is a small change for the dump: if the peer is different from ::, IFA_ADDRESS will contain the peer address instead of the local address. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/if_inet6.h | 1 + net/ipv6/addrconf.c | 64 ++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 48 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 100fb8cec17c..0727d0e4af3b 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -74,6 +74,7 @@ struct inet6_ifaddr { bool tokenized; struct rcu_head rcu; + struct in6_addr peer_addr; }; struct ip6_sf_socklist { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d1ab6ab29a55..d684d23bc027 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2402,6 +2402,7 @@ err_exit: * Manual configuration of address on an interface */ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx, + const struct in6_addr *peer_pfx, unsigned int plen, __u8 ifa_flags, __u32 prefered_lft, __u32 valid_lft) { @@ -2457,6 +2458,8 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p ifp->valid_lft = valid_lft; ifp->prefered_lft = prefered_lft; ifp->tstamp = jiffies; + if (peer_pfx) + ifp->peer_addr = *peer_pfx; spin_unlock_bh(&ifp->lock); addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev, @@ -2526,7 +2529,7 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg) return -EFAULT; rtnl_lock(); - err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, + err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL, ireq.ifr6_prefixlen, IFA_F_PERMANENT, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); rtnl_unlock(); @@ -3610,18 +3613,20 @@ restart: rcu_read_unlock_bh(); } -static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) +static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local, + struct in6_addr **peer_pfx) { struct in6_addr *pfx = NULL; + *peer_pfx = NULL; + if (addr) pfx = nla_data(addr); if (local) { if (pfx && nla_memcmp(local, pfx, sizeof(*pfx))) - pfx = NULL; - else - pfx = nla_data(local); + *peer_pfx = pfx; + pfx = nla_data(local); } return pfx; @@ -3639,7 +3644,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) struct net *net = sock_net(skb->sk); struct ifaddrmsg *ifm; struct nlattr *tb[IFA_MAX+1]; - struct in6_addr *pfx; + struct in6_addr *pfx, *peer_pfx; int err; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); @@ -3647,7 +3652,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) return err; ifm = nlmsg_data(nlh); - pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]); + pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx); if (pfx == NULL) return -EINVAL; @@ -3705,7 +3710,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) struct net *net = sock_net(skb->sk); struct ifaddrmsg *ifm; struct nlattr *tb[IFA_MAX+1]; - struct in6_addr *pfx; + struct in6_addr *pfx, *peer_pfx; struct inet6_ifaddr *ifa; struct net_device *dev; u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME; @@ -3717,7 +3722,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) return err; ifm = nlmsg_data(nlh); - pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]); + pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx); if (pfx == NULL) return -EINVAL; @@ -3745,7 +3750,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) * It would be best to check for !NLM_F_CREATE here but * userspace alreay relies on not having to provide this. */ - return inet6_addr_add(net, ifm->ifa_index, pfx, + return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx, ifm->ifa_prefixlen, ifa_flags, preferred_lft, valid_lft); } @@ -3802,6 +3807,7 @@ static inline int rt_scope(int ifa_scope) static inline int inet6_ifaddr_msgsize(void) { return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + + nla_total_size(16) /* IFA_LOCAL */ + nla_total_size(16) /* IFA_ADDRESS */ + nla_total_size(sizeof(struct ifa_cacheinfo)); } @@ -3840,13 +3846,22 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, valid = INFINITY_LIFE_TIME; } - if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 || - put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) { - nlmsg_cancel(skb, nlh); - return -EMSGSIZE; - } + if (ipv6_addr_type(&ifa->peer_addr) != IPV6_ADDR_ANY) { + if (nla_put(skb, IFA_LOCAL, 16, &ifa->addr) < 0 || + nla_put(skb, IFA_ADDRESS, 16, &ifa->peer_addr) < 0) + goto error; + } else + if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0) + goto error; + + if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) + goto error; return nlmsg_end(skb, nlh); + +error: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; } static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, @@ -4046,7 +4061,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh) struct net *net = sock_net(in_skb->sk); struct ifaddrmsg *ifm; struct nlattr *tb[IFA_MAX+1]; - struct in6_addr *addr = NULL; + struct in6_addr *addr = NULL, *peer; struct net_device *dev = NULL; struct inet6_ifaddr *ifa; struct sk_buff *skb; @@ -4056,7 +4071,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh) if (err < 0) goto errout; - addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL]); + addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer); if (addr == NULL) { err = -EINVAL; goto errout; @@ -4564,11 +4579,26 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) ip6_ins_rt(ifp->rt); if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); + if (ipv6_addr_type(&ifp->peer_addr) != IPV6_ADDR_ANY) + addrconf_prefix_route(&ifp->peer_addr, 128, + ifp->idev->dev, 0, 0); break; case RTM_DELADDR: if (ifp->idev->cnf.forwarding) addrconf_leave_anycast(ifp); addrconf_leave_solict(ifp->idev, &ifp->addr); + if (ipv6_addr_type(&ifp->peer_addr) != IPV6_ADDR_ANY) { + struct rt6_info *rt; + struct net_device *dev = ifp->idev->dev; + + rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL, + dev->ifindex, 1); + if (rt) { + dst_hold(&rt->dst); + if (ip6_del_rt(rt)) + dst_free(&rt->dst); + } + } dst_hold(&ifp->rt->dst); if (ip6_del_rt(ifp->rt)) -- cgit v1.2.3 From 3e59cb0ddfd2c59991f38e89352ad8a3c71b2374 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Fri, 17 May 2013 13:45:05 +0000 Subject: tcp: remove bad timeout logic in fast recovery tcp_timeout_skb() was intended to trigger fast recovery on timeout, unfortunately in reality it often causes spurious retransmission storms during fast recovery. The particular sign is a fast retransmit over the highest sacked sequence (SND.FACK). Currently the RTO timer re-arming (as in RFC6298) offers a nice cushion to avoid spurious timeout: when SND.UNA advances the sender re-arms RTO and extends the timeout by icsk_rto. The sender does not offset the time elapsed since the packet at SND.UNA was sent. But if the next (DUP)ACK arrives later than ~RTTVAR and triggers tcp_fastretrans_alert(), then tcp_timeout_skb() will mark any packet sent before the icsk_rto interval lost, including one that's above the highest sacked sequence. Most likely a large part of scorebard will be marked. If most packets are not lost then the subsequent DUPACKs with new SACK blocks will cause the sender to continue to retransmit packets beyond SND.FACK spuriously. Even if only one packet is lost the sender may falsely retransmit almost the entire window. The situation becomes common in the world of bufferbloat: the RTT continues to grow as the queue builds up but RTTVAR remains small and close to the minimum 200ms. If a data packet is lost and the DUPACK triggered by the next data packet is slightly delayed, then a spurious retransmission storm forms. As the original comment on tcp_timeout_skb() suggests: the usefulness of this feature is questionable. It also wastes cycles walking the sack scoreboard and is actually harmful because of false recovery. It's time to remove this. Signed-off-by: Yuchung Cheng Acked-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Nandita Dukkipati Signed-off-by: David S. Miller --- include/linux/tcp.h | 1 - include/net/tcp.h | 1 - net/ipv4/tcp_input.c | 65 +--------------------------------------------------- 3 files changed, 1 insertion(+), 66 deletions(-) (limited to 'include') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 5adbc33d1ab3..472120b4fac5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -246,7 +246,6 @@ struct tcp_sock { /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; - struct sk_buff *scoreboard_skb_hint; struct sk_buff *retransmit_skb_hint; struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 5bba80fbd1d9..e1c3723f161f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1193,7 +1193,6 @@ static inline void tcp_mib_init(struct net *net) static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) { tp->lost_skb_hint = NULL; - tp->scoreboard_skb_hint = NULL; } static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b358e8c98607..d7d369428ae4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1255,8 +1255,6 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = prev; - if (skb == tp->scoreboard_skb_hint) - tp->scoreboard_skb_hint = prev; if (skb == tp->lost_skb_hint) { tp->lost_skb_hint = prev; tp->lost_cnt_hint -= tcp_skb_pcount(prev); @@ -1964,20 +1962,6 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag) return true; } -static inline int tcp_skb_timedout(const struct sock *sk, - const struct sk_buff *skb) -{ - return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; -} - -static inline int tcp_head_timedout(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - - return tp->packets_out && - tcp_skb_timedout(sk, tcp_write_queue_head(sk)); -} - /* Linux NewReno/SACK/FACK/ECN state machine. * -------------------------------------- * @@ -2084,12 +2068,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) if (tcp_dupack_heuristics(tp) > tp->reordering) return true; - /* Trick#3 : when we use RFC2988 timer restart, fast - * retransmit can be triggered by timeout of queue head. - */ - if (tcp_is_fack(tp) && tcp_head_timedout(sk)) - return true; - /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? */ @@ -2126,44 +2104,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) return false; } -/* New heuristics: it is possible only after we switched to restart timer - * each time when something is ACKed. Hence, we can detect timed out packets - * during fast retransmit without falling to slow start. - * - * Usefulness of this as is very questionable, since we should know which of - * the segments is the next to timeout which is relatively expensive to find - * in general case unless we add some data structure just for that. The - * current approach certainly won't find the right one too often and when it - * finally does find _something_ it usually marks large part of the window - * right away (because a retransmission with a larger timestamp blocks the - * loop from advancing). -ij - */ -static void tcp_timeout_skbs(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - - if (!tcp_is_fack(tp) || !tcp_head_timedout(sk)) - return; - - skb = tp->scoreboard_skb_hint; - if (tp->scoreboard_skb_hint == NULL) - skb = tcp_write_queue_head(sk); - - tcp_for_write_queue_from(skb, sk) { - if (skb == tcp_send_head(sk)) - break; - if (!tcp_skb_timedout(sk, skb)) - break; - - tcp_skb_mark_lost(tp, skb); - } - - tp->scoreboard_skb_hint = skb; - - tcp_verify_left_out(tp); -} - /* Detect loss in event "A" above by marking head of queue up as lost. * For FACK or non-SACK(Reno) senders, the first "packets" number of segments * are considered lost. For RFC3517 SACK, a segment is considered lost if it @@ -2249,8 +2189,6 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) else if (fast_rexmit) tcp_mark_head_lost(sk, 1, 1); } - - tcp_timeout_skbs(sk); } /* CWND moderation, preventing bursts due to too big ACKs @@ -2842,7 +2780,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, fast_rexmit = 1; } - if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) + if (do_lost) tcp_update_scoreboard(sk, fast_rexmit); tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit); tcp_xmit_retransmit_queue(sk); @@ -3075,7 +3013,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); - tp->scoreboard_skb_hint = NULL; if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = NULL; if (skb == tp->lost_skb_hint) -- cgit v1.2.3 From 164954454a4b1000eb022415654001cceb9259a7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 17 May 2013 16:57:37 +0000 Subject: filter: do not output bpf image address for security reason Do not leak starting address of BPF JIT code for non root users, as it might help intruders to perform an attack. Signed-off-by: Eric Dumazet Cc: Ben Hutchings Cc: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/filter.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index c050dcc322a4..56a6b7fbb3c6 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -58,10 +58,10 @@ extern void bpf_jit_free(struct sk_filter *fp); static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { - pr_err("flen=%u proglen=%u pass=%u image=%p\n", + pr_err("flen=%u proglen=%u pass=%u image=%pK\n", flen, proglen, pass, image); if (image) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) -- cgit v1.2.3 From 99bbc70741903c063b3ccad90a3e06fc55df9245 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 20 May 2013 04:02:32 +0000 Subject: rps: selective flow shedding during softnet overflow A cpu executing the network receive path sheds packets when its input queue grows to netdev_max_backlog. A single high rate flow (such as a spoofed source DoS) can exceed a single cpu processing rate and will degrade throughput of other flows hashed onto the same cpu. This patch adds a more fine grained hashtable. If the netdev backlog is above a threshold, IRQ cpus track the ratio of total traffic of each flow (using 4096 buckets, configurable). The ratio is measured by counting the number of packets per flow over the last 256 packets from the source cpu. Any flow that occupies a large fraction of this (set at 50%) will see packet drop while above the threshold. Tested: Setup is a muli-threaded UDP echo server with network rx IRQ on cpu0, kernel receive (RPS) on cpu0 and application threads on cpus 2--7 each handling 20k req/s. Throughput halves when hit with a 400 kpps antagonist storm. With this patch applied, antagonist overload is dropped and the server processes its complete load. The patch is effective when kernel receive processing is the bottleneck. The above RPS scenario is a extreme, but the same is reached with RFS and sufficient kernel processing (iptables, packet socket tap, ..). Signed-off-by: Willem de Bruijn Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 17 ++++++++ net/Kconfig | 12 ++++++ net/core/dev.c | 48 ++++++++++++++++++++- net/core/net-procfs.c | 16 ++++++- net/core/sysctl_net_core.c | 104 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 194 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a94a5a0ab122..7dd535d4b41e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1778,6 +1778,19 @@ static inline int unregister_gifconf(unsigned int family) return register_gifconf(family, NULL); } +#ifdef CONFIG_NET_FLOW_LIMIT +#define FLOW_LIMIT_HISTORY (1 << 8) /* must be ^2 */ +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[FLOW_LIMIT_HISTORY]; + u8 buckets[]; +}; + +extern int netdev_flow_limit_table_len; +#endif /* CONFIG_NET_FLOW_LIMIT */ + /* * Incoming packets are placed on per-cpu queues */ @@ -1807,6 +1820,10 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit *flow_limit; +#endif }; static inline void input_queue_head_incr(struct softnet_data *sd) diff --git a/net/Kconfig b/net/Kconfig index 2ddc9046868e..08de901415ee 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -259,6 +259,18 @@ config BPF_JIT packet sniffing (libpcap/tcpdump). Note : Admin should enable this feature changing /proc/sys/net/core/bpf_jit_enable +config NET_FLOW_LIMIT + boolean + depends on RPS + default y + ---help--- + The network stack has to drop packets when a receive processing CPU's + backlog reaches netdev_max_backlog. If a few out of many active flows + generate the vast majority of load, drop their traffic earlier to + maintain capacity for the other flows. This feature provides servers + with many clients some protection against DoS by a single (spoofed) + flow that greatly exceeds average workload. + menu "Network testing" config NET_PKTGEN diff --git a/net/core/dev.c b/net/core/dev.c index 18e9730cc4be..7229bc30e509 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3064,6 +3064,46 @@ static int rps_ipi_queued(struct softnet_data *sd) return 0; } +#ifdef CONFIG_NET_FLOW_LIMIT +int netdev_flow_limit_table_len __read_mostly = (1 << 12); +#endif + +static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) +{ +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit *fl; + struct softnet_data *sd; + unsigned int old_flow, new_flow; + + if (qlen < (netdev_max_backlog >> 1)) + return false; + + sd = &__get_cpu_var(softnet_data); + + rcu_read_lock(); + fl = rcu_dereference(sd->flow_limit); + if (fl) { + new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1); + old_flow = fl->history[fl->history_head]; + fl->history[fl->history_head] = new_flow; + + fl->history_head++; + fl->history_head &= FLOW_LIMIT_HISTORY - 1; + + if (likely(fl->buckets[old_flow])) + fl->buckets[old_flow]--; + + if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) { + fl->count++; + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); +#endif + return false; +} + /* * enqueue_to_backlog is called to queue an skb to a per CPU backlog * queue (may be a remote CPU queue). @@ -3073,13 +3113,15 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, { struct softnet_data *sd; unsigned long flags; + unsigned int qlen; sd = &per_cpu(softnet_data, cpu); local_irq_save(flags); rps_lock(sd); - if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { + qlen = skb_queue_len(&sd->input_pkt_queue); + if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { if (skb_queue_len(&sd->input_pkt_queue)) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); @@ -6269,6 +6311,10 @@ static int __init net_dev_init(void) sd->backlog.weight = weight_p; sd->backlog.gro_list = NULL; sd->backlog.gro_count = 0; + +#ifdef CONFIG_NET_FLOW_LIMIT + sd->flow_limit = NULL; +#endif } dev_boot_phase = 0; diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index 569d355fec3e..2bf83299600a 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -146,11 +146,23 @@ static void softnet_seq_stop(struct seq_file *seq, void *v) static int softnet_seq_show(struct seq_file *seq, void *v) { struct softnet_data *sd = v; + unsigned int flow_limit_count = 0; - seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit *fl; + + rcu_read_lock(); + fl = rcu_dereference(sd->flow_limit); + if (fl) + flow_limit_count = fl->count; + rcu_read_unlock(); +#endif + + seq_printf(seq, + "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", sd->processed, sd->dropped, sd->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ - sd->cpu_collision, sd->received_rps); + sd->cpu_collision, sd->received_rps, flow_limit_count); return 0; } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index cfdb46ab3a7f..741db5fc7806 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -87,6 +87,96 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write, } #endif /* CONFIG_RPS */ +#ifdef CONFIG_NET_FLOW_LIMIT +static DEFINE_MUTEX(flow_limit_update_mutex); + +static int flow_limit_cpu_sysctl(ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct sd_flow_limit *cur; + struct softnet_data *sd; + cpumask_var_t mask; + int i, len, ret = 0; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + + if (write) { + ret = cpumask_parse_user(buffer, *lenp, mask); + if (ret) + goto done; + + mutex_lock(&flow_limit_update_mutex); + len = sizeof(*cur) + netdev_flow_limit_table_len; + for_each_possible_cpu(i) { + sd = &per_cpu(softnet_data, i); + cur = rcu_dereference_protected(sd->flow_limit, + lockdep_is_held(&flow_limit_update_mutex)); + if (cur && !cpumask_test_cpu(i, mask)) { + RCU_INIT_POINTER(sd->flow_limit, NULL); + synchronize_rcu(); + kfree(cur); + } else if (!cur && cpumask_test_cpu(i, mask)) { + cur = kzalloc(len, GFP_KERNEL); + if (!cur) { + /* not unwinding previous changes */ + ret = -ENOMEM; + goto write_unlock; + } + cur->num_buckets = netdev_flow_limit_table_len; + rcu_assign_pointer(sd->flow_limit, cur); + } + } +write_unlock: + mutex_unlock(&flow_limit_update_mutex); + } else { + if (*ppos || !*lenp) { + *lenp = 0; + goto done; + } + + cpumask_clear(mask); + rcu_read_lock(); + for_each_possible_cpu(i) { + sd = &per_cpu(softnet_data, i); + if (rcu_dereference(sd->flow_limit)) + cpumask_set_cpu(i, mask); + } + rcu_read_unlock(); + + len = cpumask_scnprintf(buffer, *lenp, mask); + *lenp = len + 1; + *ppos += len + 1; + } + +done: + free_cpumask_var(mask); + return ret; +} + +static int flow_limit_table_len_sysctl(ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + unsigned int old, *ptr; + int ret; + + mutex_lock(&flow_limit_update_mutex); + + ptr = table->data; + old = *ptr; + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (!ret && write && !is_power_of_2(*ptr)) { + *ptr = old; + ret = -EINVAL; + } + + mutex_unlock(&flow_limit_update_mutex); + return ret; +} +#endif /* CONFIG_NET_FLOW_LIMIT */ + static struct ctl_table net_core_table[] = { #ifdef CONFIG_NET { @@ -180,6 +270,20 @@ static struct ctl_table net_core_table[] = { .proc_handler = rps_sock_flow_sysctl }, #endif +#ifdef CONFIG_NET_FLOW_LIMIT + { + .procname = "flow_limit_cpu_bitmap", + .mode = 0644, + .proc_handler = flow_limit_cpu_sysctl + }, + { + .procname = "flow_limit_table_len", + .data = &netdev_flow_limit_table_len, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = flow_limit_table_len_sysctl + }, +#endif /* CONFIG_NET_FLOW_LIMIT */ #endif /* CONFIG_NET */ { .procname = "netdev_budget", -- cgit v1.2.3 From 168fc21a971e4bc821a7838ccc39b7bdaf316c11 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 20 May 2013 04:53:38 +0000 Subject: net: ipv6: remove 'next' member from inet6_dev The next pointer within the inet6_dev structure seems not to be used anywhere. So just remove it. Tested with allmodconfig on x86_64. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/net/if_inet6.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 0727d0e4af3b..e07feb456d19 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -193,7 +193,6 @@ struct inet6_dev { struct in6_addr token; struct neigh_parms *nd_parms; - struct inet6_dev *next; struct ipv6_devconf cnf; struct ipv6_devstat stats; unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ -- cgit v1.2.3 From 71cea17ed39fdf1c0634f530ddc6a2c2fc601c2b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 20 May 2013 06:52:26 +0000 Subject: tcp: md5: remove spinlock usage in fast path TCP md5 code uses per cpu variables but protects access to them with a shared spinlock, which is a contention point. [ tcp_md5sig_pool_lock is locked twice per incoming packet ] Makes things much simpler, by allocating crypto structures once, first time a socket needs md5 keys, and not deallocating them as they are really small. Next step would be to allow crypto allocations being done in a NUMA aware way. Signed-off-by: Eric Dumazet Cc: Herbert Xu Signed-off-by: David S. Miller --- include/net/tcp.h | 8 ++-- net/ipv4/tcp.c | 98 +++++++++++------------------------------------- net/ipv4/tcp_ipv4.c | 10 +---- net/ipv4/tcp_minisocks.c | 6 +-- 4 files changed, 29 insertions(+), 93 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index e1c3723f161f..bf1cc3dced5e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1283,11 +1283,13 @@ static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, #define tcp_twsk_md5_key(twsk) NULL #endif -extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *); -extern void tcp_free_md5sig_pool(void); +extern bool tcp_alloc_md5sig_pool(void); extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); -extern void tcp_put_md5sig_pool(void); +static inline void tcp_put_md5sig_pool(void) +{ + local_bh_enable(); +} extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *); extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index dcb116dde216..53d9c120fbb8 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3095,9 +3095,8 @@ int tcp_gro_complete(struct sk_buff *skb) EXPORT_SYMBOL(tcp_gro_complete); #ifdef CONFIG_TCP_MD5SIG -static unsigned long tcp_md5sig_users; -static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool; -static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); +static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; +static DEFINE_MUTEX(tcp_md5sig_mutex); static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) { @@ -3112,30 +3111,14 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) free_percpu(pool); } -void tcp_free_md5sig_pool(void) -{ - struct tcp_md5sig_pool __percpu *pool = NULL; - - spin_lock_bh(&tcp_md5sig_pool_lock); - if (--tcp_md5sig_users == 0) { - pool = tcp_md5sig_pool; - tcp_md5sig_pool = NULL; - } - spin_unlock_bh(&tcp_md5sig_pool_lock); - if (pool) - __tcp_free_md5sig_pool(pool); -} -EXPORT_SYMBOL(tcp_free_md5sig_pool); - -static struct tcp_md5sig_pool __percpu * -__tcp_alloc_md5sig_pool(struct sock *sk) +static void __tcp_alloc_md5sig_pool(void) { int cpu; struct tcp_md5sig_pool __percpu *pool; pool = alloc_percpu(struct tcp_md5sig_pool); if (!pool) - return NULL; + return; for_each_possible_cpu(cpu) { struct crypto_hash *hash; @@ -3146,53 +3129,27 @@ __tcp_alloc_md5sig_pool(struct sock *sk) per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; } - return pool; + /* before setting tcp_md5sig_pool, we must commit all writes + * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() + */ + smp_wmb(); + tcp_md5sig_pool = pool; + return; out_free: __tcp_free_md5sig_pool(pool); - return NULL; } -struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) +bool tcp_alloc_md5sig_pool(void) { - struct tcp_md5sig_pool __percpu *pool; - bool alloc = false; - -retry: - spin_lock_bh(&tcp_md5sig_pool_lock); - pool = tcp_md5sig_pool; - if (tcp_md5sig_users++ == 0) { - alloc = true; - spin_unlock_bh(&tcp_md5sig_pool_lock); - } else if (!pool) { - tcp_md5sig_users--; - spin_unlock_bh(&tcp_md5sig_pool_lock); - cpu_relax(); - goto retry; - } else - spin_unlock_bh(&tcp_md5sig_pool_lock); - - if (alloc) { - /* we cannot hold spinlock here because this may sleep. */ - struct tcp_md5sig_pool __percpu *p; - - p = __tcp_alloc_md5sig_pool(sk); - spin_lock_bh(&tcp_md5sig_pool_lock); - if (!p) { - tcp_md5sig_users--; - spin_unlock_bh(&tcp_md5sig_pool_lock); - return NULL; - } - pool = tcp_md5sig_pool; - if (pool) { - /* oops, it has already been assigned. */ - spin_unlock_bh(&tcp_md5sig_pool_lock); - __tcp_free_md5sig_pool(p); - } else { - tcp_md5sig_pool = pool = p; - spin_unlock_bh(&tcp_md5sig_pool_lock); - } + if (unlikely(!tcp_md5sig_pool)) { + mutex_lock(&tcp_md5sig_mutex); + + if (!tcp_md5sig_pool) + __tcp_alloc_md5sig_pool(); + + mutex_unlock(&tcp_md5sig_mutex); } - return pool; + return tcp_md5sig_pool != NULL; } EXPORT_SYMBOL(tcp_alloc_md5sig_pool); @@ -3209,28 +3166,15 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) struct tcp_md5sig_pool __percpu *p; local_bh_disable(); - - spin_lock(&tcp_md5sig_pool_lock); - p = tcp_md5sig_pool; - if (p) - tcp_md5sig_users++; - spin_unlock(&tcp_md5sig_pool_lock); - + p = ACCESS_ONCE(tcp_md5sig_pool); if (p) - return this_cpu_ptr(p); + return __this_cpu_ptr(p); local_bh_enable(); return NULL; } EXPORT_SYMBOL(tcp_get_md5sig_pool); -void tcp_put_md5sig_pool(void) -{ - local_bh_enable(); - tcp_free_md5sig_pool(); -} -EXPORT_SYMBOL(tcp_put_md5sig_pool); - int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, const struct tcphdr *th) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 719652305a29..d20ede0c9593 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1026,7 +1026,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, key = sock_kmalloc(sk, sizeof(*key), gfp); if (!key) return -ENOMEM; - if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) { + if (!tcp_alloc_md5sig_pool()) { sock_kfree_s(sk, key, sizeof(*key)); return -ENOMEM; } @@ -1044,9 +1044,7 @@ EXPORT_SYMBOL(tcp_md5_do_add); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) { - struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; - struct tcp_md5sig_info *md5sig; key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); if (!key) @@ -1054,10 +1052,6 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); - md5sig = rcu_dereference_protected(tp->md5sig_info, - sock_owned_by_user(sk)); - if (hlist_empty(&md5sig->head)) - tcp_free_md5sig_pool(); return 0; } EXPORT_SYMBOL(tcp_md5_do_del); @@ -1071,8 +1065,6 @@ static void tcp_clear_md5_list(struct sock *sk) md5sig = rcu_dereference_protected(tp->md5sig_info, 1); - if (!hlist_empty(&md5sig->head)) - tcp_free_md5sig_pool(); hlist_for_each_entry_safe(key, n, &md5sig->head, node) { hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0f0178827259..ab1c08658528 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -317,7 +317,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) key = tp->af_specific->md5_lookup(sk, sk); if (key != NULL) { tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); - if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL) + if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()) BUG(); } } while (0); @@ -358,10 +358,8 @@ void tcp_twsk_destructor(struct sock *sk) #ifdef CONFIG_TCP_MD5SIG struct tcp_timewait_sock *twsk = tcp_twsk(sk); - if (twsk->tw_md5_key) { - tcp_free_md5sig_pool(); + if (twsk->tw_md5_key) kfree_rcu(twsk->tw_md5_key, rcu); - } #endif } EXPORT_SYMBOL_GPL(tcp_twsk_destructor); -- cgit v1.2.3 From 2c7b49212a86f13697281a4dace2cb96aec71d6b Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 19 May 2013 22:53:42 +0000 Subject: phy: fix the use of PHY_IGNORE_INTERRUPT When a PHY device is registered with the special IRQ value PHY_IGNORE_INTERRUPT (-2) it will not properly be handled by the PHY library: - it continues to poll its register, while we do not want this because such PHY link events or register changes are serviced by an Ethernet MAC - it will still try to configure PHY interrupts at the PHY level, such interrupts do not exist at the PHY but at the MAC level - the state machine only handles PHY_POLL, but should also handle PHY_IGNORE_INTERRUPT similarly This patch updates the PHY state machine and initialization paths to account for the specific PHY_IGNORE_INTERRUPT. Based on an earlier patch by Thomas Petazzoni, and reworked to add the missing bits. Add a helper phy_interrupt_is_valid() which specifically tests for a PHY interrupt not to be PHY_POLL or PHY_IGNORE_INTERRUPT and use it throughout the code. Signed-off-by: Thomas Petazzoni Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 9 +++++---- drivers/net/phy/phy_device.c | 7 +++++-- include/linux/phy.h | 12 ++++++++++++ 3 files changed, 22 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c14f14741b3f..3bcf0994d3ba 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -682,7 +682,7 @@ void phy_stop(struct phy_device *phydev) if (PHY_HALTED == phydev->state) goto out_unlock; - if (phydev->irq != PHY_POLL) { + if (phy_interrupt_is_valid(phydev)) { /* Disable PHY Interrupts */ phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); @@ -828,8 +828,9 @@ void phy_state_machine(struct work_struct *work) break; case PHY_RUNNING: /* Only register a CHANGE if we are - * polling */ - if (PHY_POLL == phydev->irq) + * polling or ignoring interrupts + */ + if (!phy_interrupt_is_valid(phydev)) phydev->state = PHY_CHANGELINK; break; case PHY_CHANGELINK: @@ -848,7 +849,7 @@ void phy_state_machine(struct work_struct *work) phydev->adjust_link(phydev->attached_dev); - if (PHY_POLL != phydev->irq) + if (phy_interrupt_is_valid(phydev)) err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); break; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 3657b4a29124..8e29d22ba113 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1009,8 +1009,11 @@ static int phy_probe(struct device *dev) phydrv = to_phy_driver(drv); phydev->drv = phydrv; - /* Disable the interrupt if the PHY doesn't support it */ - if (!(phydrv->flags & PHY_HAS_INTERRUPT)) + /* Disable the interrupt if the PHY doesn't support it + * but the interrupt is still a valid one + */ + if (!(phydrv->flags & PHY_HAS_INTERRUPT) && + phy_interrupt_is_valid(phydev)) phydev->irq = PHY_POLL; mutex_lock(&phydev->lock); diff --git a/include/linux/phy.h b/include/linux/phy.h index 9e11039dd7a3..8e4bc8ab692d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -508,6 +508,18 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) return mdiobus_write(phydev->bus, phydev->addr, regnum, val); } +/** + * phy_interrupt_is_valid - Convenience function for testing a given PHY irq + * @phydev: the phy_device struct + * + * NOTE: must be kept in sync with addition/removal of PHY_POLL and + * PHY_IGNORE_INTERRUPT + */ +static inline bool phy_interrupt_is_valid(struct phy_device *phydev) +{ + return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT; +} + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); -- cgit v1.2.3 From 5ea94e7686a3aa04cc0d01a2d8bd3d0292b3f592 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 19 May 2013 22:53:43 +0000 Subject: phy: add phy_mac_interrupt() to use with PHY_IGNORE_INTERRUPT There is currently no way for an Ethernet MAC driver servicing PHY link interrupts to notify this to the PHY state machine without defining its own state machine. Since most drivers are not so special, introduce a helper: phy_mac_interrupt() which can be called from a link up/down interrupt routine to update the PHY state machine. To avoid code duplication some refactoring has been done to expose the workqueue and its corresponding callback internally. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 14 +++++++++----- drivers/net/phy/phy_device.c | 1 + include/linux/phy.h | 2 ++ 3 files changed, 12 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 3bcf0994d3ba..2d28a0ef4572 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -419,8 +419,6 @@ out_unlock: EXPORT_SYMBOL(phy_start_aneg); -static void phy_change(struct work_struct *work); - /** * phy_start_machine - start PHY state machine tracking * @phydev: the phy_device struct @@ -565,8 +563,6 @@ int phy_start_interrupts(struct phy_device *phydev) { int err = 0; - INIT_WORK(&phydev->phy_queue, phy_change); - atomic_set(&phydev->irq_disable, 0); if (request_irq(phydev->irq, phy_interrupt, IRQF_SHARED, @@ -623,7 +619,7 @@ EXPORT_SYMBOL(phy_stop_interrupts); * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes * @work: work_struct that describes the work to be done */ -static void phy_change(struct work_struct *work) +void phy_change(struct work_struct *work) { int err; struct phy_device *phydev = @@ -922,6 +918,14 @@ void phy_state_machine(struct work_struct *work) schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); } +void phy_mac_interrupt(struct phy_device *phydev, int new_link) +{ + cancel_work_sync(&phydev->phy_queue); + phydev->link = new_link; + schedule_work(&phydev->phy_queue); +} +EXPORT_SYMBOL(phy_mac_interrupt); + static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, int addr) { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8e29d22ba113..b55aa33a5b8b 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -189,6 +189,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, mutex_init(&dev->lock); INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); + INIT_WORK(&dev->phy_queue, phy_change); /* Request the appropriate module unconditionally; don't bother trying to do so only if it isn't already loaded, diff --git a/include/linux/phy.h b/include/linux/phy.h index 8e4bc8ab692d..fdfa11542974 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -557,6 +557,8 @@ void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver); int phy_drivers_register(struct phy_driver *new_driver, int n); void phy_state_machine(struct work_struct *work); +void phy_change(struct work_struct *work); +void phy_mac_interrupt(struct phy_device *phydev, int new_link); void phy_start_machine(struct phy_device *phydev, void (*handler)(struct net_device *)); void phy_stop_machine(struct phy_device *phydev); -- cgit v1.2.3 From 78e578c5b43c4f274305075c68d055c8d6141fd5 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 13 May 2013 22:07:53 +0200 Subject: bcma: support SPROM rev 10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is pretty much the same as rev 9, there are just 2 extra fields we know about, but are not used/stored yet anyway. Signed-off-by: Rafał Miłecki Signed-off-by: John W. Linville --- drivers/bcma/sprom.c | 16 +++++++++------- include/linux/ssb/ssb_regs.h | 1 + 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index c96f71c183cd..de15b4f4b237 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -154,7 +154,8 @@ static int bcma_sprom_check_crc(const u16 *sprom, size_t words) return 0; } -static int bcma_sprom_valid(const u16 *sprom, size_t words) +static int bcma_sprom_valid(struct bcma_bus *bus, const u16 *sprom, + size_t words) { u16 revision; int err; @@ -164,11 +165,14 @@ static int bcma_sprom_valid(const u16 *sprom, size_t words) return err; revision = sprom[words - 1] & SSB_SPROM_REVISION_REV; - if (revision != 8 && revision != 9) { + if (revision != 8 && revision != 9 && revision != 10) { pr_err("Unsupported SPROM revision: %d\n", revision); return -ENOENT; } + bus->sprom.revision = revision; + bcma_debug(bus, "Found SPROM revision %d\n", revision); + return 0; } @@ -208,9 +212,6 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) != ARRAY_SIZE(bus->sprom.core_pwr_info)); - bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & - SSB_SPROM_REVISION_REV; - for (i = 0; i < 3; i++) { v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i]; *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v); @@ -549,7 +550,8 @@ int bcma_sprom_get(struct bcma_bus *bus) { u16 offset = BCMA_CC_SPROM; u16 *sprom; - size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4, }; + size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4, + SSB_SPROMSIZE_WORDS_R10, }; int i, err = 0; if (!bus->drv_cc.core) @@ -592,7 +594,7 @@ int bcma_sprom_get(struct bcma_bus *bus) return -ENOMEM; bcma_sprom_read(bus, offset, sprom, words); - err = bcma_sprom_valid(sprom, words); + err = bcma_sprom_valid(bus, sprom, words); if (!err) break; diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 3a7256955b10..f9f931c89e3e 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -172,6 +172,7 @@ #define SSB_SPROMSIZE_WORDS_R4 220 #define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) #define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) +#define SSB_SPROMSIZE_WORDS_R10 230 #define SSB_SPROM_BASE1 0x1000 #define SSB_SPROM_BASE31 0x0800 #define SSB_SPROM_REVISION 0x007E -- cgit v1.2.3 From 6d11cfdba52af08b889fd6d3ee4212930493eb38 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 22 May 2013 22:42:36 +0000 Subject: netfilter: don't panic on error while walking through the init path Don't panic if we hit an error while adding the nf_log or pernet netfilter support, just bail out. Signed-off-by: Pablo Neira Ayuso Acked-by: Gao feng --- include/linux/netfilter.h | 2 +- net/netfilter/core.c | 21 +++++++++++++++------ net/netfilter/nf_log.c | 5 +---- net/socket.c | 4 +++- 4 files changed, 20 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 0060fde3160e..de70f7b45b68 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -35,7 +35,7 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, result->all[3] = a1->all[3] & mask->all[3]; } -extern void netfilter_init(void); +extern int netfilter_init(void); /* Largest hook number + 1 */ #define NF_MAX_HOOKS 8 diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 07c865a31a3d..300539db7bb1 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -302,17 +302,26 @@ static struct pernet_operations netfilter_net_ops = { .exit = netfilter_net_exit, }; -void __init netfilter_init(void) +int __init netfilter_init(void) { - int i, h; + int i, h, ret; + for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) { for (h = 0; h < NF_MAX_HOOKS; h++) INIT_LIST_HEAD(&nf_hooks[i][h]); } - if (register_pernet_subsys(&netfilter_net_ops) < 0) - panic("cannot create netfilter proc entry"); + ret = register_pernet_subsys(&netfilter_net_ops); + if (ret < 0) + goto err; + + ret = netfilter_log_init(); + if (ret < 0) + goto err_pernet; - if (netfilter_log_init() < 0) - panic("cannot initialize nf_log"); + return 0; +err_pernet: + unregister_pernet_subsys(&netfilter_net_ops); +err: + return ret; } diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 388656d5a9ec..bd5474adcabc 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -368,10 +368,7 @@ static int __net_init nf_log_net_init(struct net *net) return 0; out_sysctl: - /* For init_net: errors will trigger panic, don't unroll on error. */ - if (!net_eq(net, &init_net)) - remove_proc_entry("nf_log", net->nf.proc_netfilter); - + remove_proc_entry("nf_log", net->nf.proc_netfilter); return ret; } diff --git a/net/socket.c b/net/socket.c index 6b94633ca61d..734194d36242 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2612,7 +2612,9 @@ static int __init sock_init(void) */ #ifdef CONFIG_NETFILTER - netfilter_init(); + err = netfilter_init(); + if (err) + goto out; #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING -- cgit v1.2.3 From de94c4591bd606729af1b913d6e98c6c449e42df Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 22 May 2013 22:42:37 +0000 Subject: netfilter: {ipt,ebt}_ULOG: rise warning on deprecation This target has been superseded by NFLOG. Spot a warning so we prepare removal in a couple of years. Signed-off-by: Pablo Neira Ayuso Acked-by: Gao feng --- include/net/netns/x_tables.h | 6 ++++++ net/bridge/netfilter/ebt_ulog.c | 6 ++++++ net/ipv4/netfilter/Kconfig | 2 +- net/ipv4/netfilter/ipt_ULOG.c | 6 ++++++ 4 files changed, 19 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h index c24060ee411e..02fe40f8c8fd 100644 --- a/include/net/netns/x_tables.h +++ b/include/net/netns/x_tables.h @@ -15,5 +15,11 @@ struct netns_xt { struct ebt_table *frame_filter; struct ebt_table *frame_nat; #endif +#if IS_ENABLED(CONFIG_IP_NF_TARGET_ULOG) + bool ulog_warn_deprecated; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_EBT_ULOG) + bool ebt_ulog_warn_deprecated; +#endif }; #endif diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index fc1905c51417..2ec6c19ff903 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -267,6 +267,12 @@ static int ebt_ulog_tg_check(const struct xt_tgchk_param *par) { struct ebt_ulog_info *uloginfo = par->targinfo; + if (!par->net->xt.ebt_ulog_warn_deprecated) { + pr_info("ebt_ulog is deprecated and it will be removed soon, " + "use ebt_nflog instead\n"); + par->net->xt.ebt_ulog_warn_deprecated = true; + } + if (uloginfo->nlgroup > 31) return -EINVAL; diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index e7916c193932..4e9028017428 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -111,7 +111,7 @@ config IP_NF_TARGET_REJECT To compile it as a module, choose M here. If unsure, say N. config IP_NF_TARGET_ULOG - tristate "ULOG target support" + tristate "ULOG target support (obsolete)" default m if NETFILTER_ADVANCED=n ---help--- diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index f8a222cb6448..c1953d07e2f4 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -325,6 +325,12 @@ static int ulog_tg_check(const struct xt_tgchk_param *par) { const struct ipt_ulog_info *loginfo = par->targinfo; + if (!par->net->xt.ulog_warn_deprecated) { + pr_info("ULOG is deprecated and it will be removed soon, " + "use NFLOG instead\n"); + par->net->xt.ulog_warn_deprecated = true; + } + if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') { pr_debug("prefix not null-terminated\n"); return -EINVAL; -- cgit v1.2.3 From f6f3c437d09e2f62533034e67bfb4385191e992c Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 22 May 2013 14:50:31 +0900 Subject: sched: add cond_resched_rcu() helper This is intended for use in loops which read data protected by RCU and may have a large number of iterations. Such an example is dumping the list of connections known to IPVS: ip_vs_conn_array() and ip_vs_conn_seq_next(). The benefits are for CONFIG_PREEMPT_RCU=y where we save CPU cycles by moving rcu_read_lock and rcu_read_unlock out of large loops but still allowing the current task to be preempted after every loop iteration for the CONFIG_PREEMPT_RCU=n case. The call to cond_resched() is not needed when CONFIG_PREEMPT_RCU=y. Thanks to Paul E. McKenney for explaining this and for the final version that checks the context with CONFIG_DEBUG_ATOMIC_SLEEP=y for all possible configurations. The function can be empty in the CONFIG_PREEMPT_RCU case, rcu_read_lock and rcu_read_unlock are not needed in this case because the task can be preempted on indication from scheduler. Thanks to Peter Zijlstra for catching this and for his help in trying a solution that changes __might_sleep. Initial cond_resched_rcu_lock() function suggested by Eric Dumazet. Tested-by: Julian Anastasov Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman Acked-by: Peter Zijlstra Signed-off-by: Pablo Neira Ayuso --- include/linux/sched.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 178a8d909f14..4ff8da189253 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2444,6 +2444,15 @@ extern int __cond_resched_softirq(void); __cond_resched_softirq(); \ }) +static inline void cond_resched_rcu(void) +{ +#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) + rcu_read_unlock(); + cond_resched(); + rcu_read_lock(); +#endif +} + /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, -- cgit v1.2.3 From a5560a6c17a4af4e8f45d5c1215ba7d5bb0ff109 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Wed, 22 May 2013 06:34:47 +0000 Subject: xen: netif.h: document feature-split-event-channels This patch synchronises documentation for feature-split-event-channels from Xen canonical header file. Signed-off-by: Wei Liu Signed-off-by: David S. Miller --- include/xen/interface/io/netif.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h index 3ef3fe05ee99..eb262e3324d2 100644 --- a/include/xen/interface/io/netif.h +++ b/include/xen/interface/io/netif.h @@ -38,6 +38,18 @@ * that it cannot safely queue packets (as it may not be kicked to send them). */ + /* + * "feature-split-event-channels" is introduced to separate guest TX + * and RX notificaion. Backend either doesn't support this feature or + * advertise it via xenstore as 0 (disabled) or 1 (enabled). + * + * To make use of this feature, frontend should allocate two event + * channels for TX and RX, advertise them to backend as + * "event-channel-tx" and "event-channel-rx" respectively. If frontend + * doesn't want to use this feature, it just writes "event-channel" + * node as before. + */ + /* * This is the 'wire' format for packets: * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) -- cgit v1.2.3 From 786677d100600b7f6089bae0d3967c1b901a6141 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Fri, 24 May 2013 12:05:45 +0200 Subject: mac80211: add STBC flag for radiotap Some chips can tell us if received frame was encoded with STBC or not. To make this information available in user space we can use updated radiotap specification: http://www.radiotap.org/defined-fields/MCS This patch will set number of STBC encoded spatial streams (Nss). The HAVE_STBC flag should be provided by driver. Signed-off-by: Oleksij Rempel Signed-off-by: Johannes Berg --- include/net/ieee80211_radiotap.h | 7 +++++++ include/net/mac80211.h | 4 ++++ net/mac80211/rx.c | 4 ++++ 3 files changed, 15 insertions(+) (limited to 'include') diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index c3999632e616..c6d07cb074bc 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -269,6 +269,7 @@ enum ieee80211_radiotap_type { #define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04 #define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08 #define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10 +#define IEEE80211_RADIOTAP_MCS_HAVE_STBC 0x20 #define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03 #define IEEE80211_RADIOTAP_MCS_BW_20 0 @@ -278,6 +279,12 @@ enum ieee80211_radiotap_type { #define IEEE80211_RADIOTAP_MCS_SGI 0x04 #define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 +#define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60 +#define IEEE80211_RADIOTAP_MCS_STBC_1 1 +#define IEEE80211_RADIOTAP_MCS_STBC_2 2 +#define IEEE80211_RADIOTAP_MCS_STBC_3 3 + +#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5 /* For IEEE80211_RADIOTAP_AMPDU_STATUS */ #define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001 diff --git a/include/net/mac80211.h b/include/net/mac80211.h index c36535e7b291..c9e6fb7417f6 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -805,6 +805,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * on this subframe * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC * is stored in the @ampdu_delimiter_crc field) + * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -832,8 +833,11 @@ enum mac80211_rx_flags { RX_FLAG_80MHZ = BIT(23), RX_FLAG_80P80MHZ = BIT(24), RX_FLAG_160MHZ = BIT(25), + RX_FLAG_STBC_MASK = BIT(26) | BIT(27), }; +#define RX_FLAG_STBC_SHIFT 26 + /** * struct ieee80211_rx_status - receive status * diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6e2c8c5236c4..7507f7cdd68c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -258,6 +258,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, pos += 2; if (status->flag & RX_FLAG_HT) { + unsigned int stbc; + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); *pos++ = local->hw.radiotap_mcs_details; *pos = 0; @@ -267,6 +269,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos |= IEEE80211_RADIOTAP_MCS_BW_40; if (status->flag & RX_FLAG_HT_GF) *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; + stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; + *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; pos++; *pos++ = status->rate_idx; } -- cgit v1.2.3 From 5e4b6f5698421d94226cc2f80eae6d613c9acef8 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Thu, 16 May 2013 20:11:08 +0300 Subject: cfg80211: Allow TDLS peer AID to be configured for VHT VHT uses peer AID in the PARTIAL_AID field in TDLS frames. The current design for TDLS is to first add a dummy STA entry before completing TDLS Setup and then update information on this STA entry based on what was received from the peer during the setup exchange. In theory, this could use NL80211_ATTR_STA_AID to set the peer AID just like this is used in AP mode to set the AID of an association station. However, existing cfg80211 validation rules prevent this attribute from being used with set_station operation. To avoid interoperability issues between different kernel and user space version combinations, introduce a new nl80211 attribute for the purpose of setting TDLS peer AID. This attribute can be used in both the new_station and set_station operations. It is not supposed to be allowed to change the AID value during the lifetime of the STA entry, but that validation is left for drivers to do in the change_station callback. Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 7 +++++++ net/wireless/nl80211.c | 11 +++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 06320713e9c9..32b060ea5266 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1431,6 +1431,11 @@ enum nl80211_commands { * @NL80211_ATTR_MAX_CRIT_PROT_DURATION: duration in milliseconds in which * the connection should have increased reliability (u16). * + * @NL80211_ATTR_PEER_AID: Association ID for the peer TDLS station (u16). + * This is similar to @NL80211_ATTR_STA_AID but with a difference of being + * allowed to be used with the first @NL80211_CMD_SET_STATION command to + * update a TDLS peer STA entry. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1729,6 +1734,8 @@ enum nl80211_attrs { NL80211_ATTR_CRIT_PROT_ID, NL80211_ATTR_MAX_CRIT_PROT_DURATION, + NL80211_ATTR_PEER_AID, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5f10f7acfa06..14276af7964b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -378,6 +378,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 }, }; /* policy for the key attributes */ @@ -3872,6 +3873,8 @@ static int nl80211_set_station_tdls(struct genl_info *info, struct station_parameters *params) { /* Dummy STA entry gets updated once the peer capabilities are known */ + if (info->attrs[NL80211_ATTR_PEER_AID]) + params->aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params->ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); @@ -4012,7 +4015,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) return -EINVAL; - if (!info->attrs[NL80211_ATTR_STA_AID]) + if (!info->attrs[NL80211_ATTR_STA_AID] && + !info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@ -4023,7 +4027,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); - params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); + if (info->attrs[NL80211_ATTR_STA_AID]) + params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); + else + params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); if (!params.aid || params.aid > IEEE80211_MAX_AID) return -EINVAL; -- cgit v1.2.3 From b422c6cd7e93bb613030f14d7d8a0cc73f115629 Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Fri, 10 May 2013 17:50:51 -0700 Subject: {cfg,mac}80211: move mandatory rates calculation to cfg80211 Move mandatory rates calculation to cfg80211, shared with non mac80211 drivers. Signed-off-by: Ashok Nagarajan [extend documentation] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 9 +++++++++ net/mac80211/ibss.c | 10 +++++++--- net/mac80211/ieee80211_i.h | 3 --- net/mac80211/mesh.c | 5 +++-- net/mac80211/util.c | 26 -------------------------- net/wireless/util.c | 23 +++++++++++++++++++++++ 6 files changed, 42 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 32a2f1b20861..58f6302da145 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3005,6 +3005,15 @@ struct ieee80211_rate * ieee80211_get_response_rate(struct ieee80211_supported_band *sband, u32 basic_rates, int bitrate); +/** + * ieee80211_mandatory_rates - get mandatory rates for a given band + * @sband: the band to look for rates in + * + * This function returns a bitmap of the mandatory rates for the given + * band, bits are set according to the rate position in the bitrates array. + */ +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband); + /* * Radiotap parsing functions -- for controlled injection support * diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 170f9a7fa319..956ba6316da5 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -341,6 +341,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; int band; /* @@ -380,8 +381,9 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, sta->last_rx = jiffies; /* make sure mandatory rates are always added */ + sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(local, band); + ieee80211_mandatory_rates(sband); return ieee80211_ibss_finish_sta(sta, auth); } @@ -492,7 +494,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, prev_rates = sta->sta.supp_rates[band]; /* make sure mandatory rates are always added */ sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(local, band); + ieee80211_mandatory_rates(sband); if (sta->sta.supp_rates[band] != prev_rates) { ibss_dbg(sdata, @@ -624,6 +626,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; int band; /* @@ -658,8 +661,9 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, sta->last_rx = jiffies; /* make sure mandatory rates are always added */ + sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(local, band); + ieee80211_mandatory_rates(sband); spin_lock(&ifibss->incomplete_lock); list_add(&sta->list, &ifibss->incomplete_stations); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 158e6eb188d3..b7cbd4ebf0e0 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1505,9 +1505,6 @@ static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action, ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); } -u32 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band); - void ieee80211_dynamic_ps_enable_work(struct work_struct *work); void ieee80211_dynamic_ps_disable_work(struct work_struct *work); void ieee80211_dynamic_ps_timer(unsigned long data); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index c13db9ad394b..c14bb816c6a3 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -741,6 +741,8 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT; enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband = + sdata->local->hw.wiphy->bands[band]; local->fif_other_bss++; /* mesh ifaces must set allmulti to forward mcast traffic */ @@ -758,8 +760,7 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) sdata->vif.bss_conf.ht_operation_mode = ifmsh->mshcfg.ht_opmode; sdata->vif.bss_conf.enable_beacon = true; - sdata->vif.bss_conf.basic_rates = - ieee80211_mandatory_rates(local, band); + sdata->vif.bss_conf.basic_rates = ieee80211_mandatory_rates(sband); changed |= ieee80211_mps_local_status_update(sdata); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3f87fa468b1f..707953fd8324 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1072,32 +1072,6 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, ieee80211_set_wmm_default(sdata, true); } -u32 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band) -{ - struct ieee80211_supported_band *sband; - struct ieee80211_rate *bitrates; - u32 mandatory_rates; - enum ieee80211_rate_flags mandatory_flag; - int i; - - sband = local->hw.wiphy->bands[band]; - if (WARN_ON(!sband)) - return 1; - - if (band == IEEE80211_BAND_2GHZ) - mandatory_flag = IEEE80211_RATE_MANDATORY_B; - else - mandatory_flag = IEEE80211_RATE_MANDATORY_A; - - bitrates = sband->bitrates; - mandatory_rates = 0; - for (i = 0; i < sband->n_bitrates; i++) - if (bitrates[i].flags & mandatory_flag) - mandatory_rates |= BIT(i); - return mandatory_rates; -} - void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *da, diff --git a/net/wireless/util.c b/net/wireless/util.c index b11052be09be..0962f107f57f 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -33,6 +33,29 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, } EXPORT_SYMBOL(ieee80211_get_response_rate); +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband) +{ + struct ieee80211_rate *bitrates; + u32 mandatory_rates = 0; + enum ieee80211_rate_flags mandatory_flag; + int i; + + if (WARN_ON(!sband)) + return 1; + + if (sband->band == IEEE80211_BAND_2GHZ) + mandatory_flag = IEEE80211_RATE_MANDATORY_B; + else + mandatory_flag = IEEE80211_RATE_MANDATORY_A; + + bitrates = sband->bitrates; + for (i = 0; i < sband->n_bitrates; i++) + if (bitrates[i].flags & mandatory_flag) + mandatory_rates |= BIT(i); + return mandatory_rates; +} +EXPORT_SYMBOL(ieee80211_mandatory_rates); + int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band) { /* see 802.11 17.3.8.3.2 and Annex J -- cgit v1.2.3 From 5fe231e873729fa2f57cdc417d5c1f80871e2d7d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 8 May 2013 21:45:15 +0200 Subject: cfg80211: vastly simplify locking Virtually all code paths in cfg80211 already (need to) hold the RTNL. As such, there's little point in having another four mutexes for various parts of the code, they just cause lock ordering issues (and much of the time, the RTNL and a few of the others need thus be held.) Simplify all this by getting rid of the extra four mutexes and just use the RTNL throughout. Only a few code changes were needed to do this and we can get rid of a work struct for bonus points. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 5 +- net/wireless/core.c | 166 ++++++++---------------------------------- net/wireless/core.h | 32 +------- net/wireless/debugfs.c | 4 +- net/wireless/ibss.c | 10 +-- net/wireless/mesh.c | 2 - net/wireless/mlme.c | 12 +-- net/wireless/nl80211.c | 178 +++++++++------------------------------------ net/wireless/reg.c | 36 +++------ net/wireless/scan.c | 42 +++++------ net/wireless/sme.c | 26 +------ net/wireless/util.c | 11 --- net/wireless/wext-compat.c | 22 +----- net/wireless/wext-sme.c | 18 ----- 14 files changed, 117 insertions(+), 447 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 58f6302da145..5430f70c63b3 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1257,6 +1257,7 @@ struct cfg80211_ssid { * @scan_start: time (in jiffies) when the scan started * @wdev: the wireless device to scan for * @aborted: (internal) scan request was notified as aborted + * @notified: (internal) scan request was notified as done or aborted * @no_cck: used to send probe requests at non CCK rate in 2GHz band */ struct cfg80211_scan_request { @@ -1274,7 +1275,7 @@ struct cfg80211_scan_request { /* internal */ struct wiphy *wiphy; unsigned long scan_start; - bool aborted; + bool aborted, notified; bool no_cck; /* keep last */ @@ -2874,8 +2875,6 @@ struct wireless_dev { struct mutex mtx; - struct work_struct cleanup_work; - bool use_4addr, p2p_started; u8 address[ETH_ALEN] __aligned(sizeof(u16)); diff --git a/net/wireless/core.c b/net/wireless/core.c index 9416b8f55f50..5fc642d4071b 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -36,12 +36,10 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("wireless configuration support"); MODULE_ALIAS_GENL_FAMILY(NL80211_GENL_NAME); -/* RCU-protected (and cfg80211_mutex for writers) */ +/* RCU-protected (and RTNL for writers) */ LIST_HEAD(cfg80211_rdev_list); int cfg80211_rdev_list_generation; -DEFINE_MUTEX(cfg80211_mutex); - /* for debugfs */ static struct dentry *ieee80211_debugfs_dir; @@ -53,12 +51,11 @@ module_param(cfg80211_disable_40mhz_24ghz, bool, 0644); MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz, "Disable 40MHz support in the 2.4GHz band"); -/* requires cfg80211_mutex to be held! */ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) { struct cfg80211_registered_device *result = NULL, *rdev; - assert_cfg80211_lock(); + ASSERT_RTNL(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (rdev->wiphy_idx == wiphy_idx) { @@ -77,12 +74,11 @@ int get_wiphy_idx(struct wiphy *wiphy) return rdev->wiphy_idx; } -/* requires cfg80211_rdev_mutex to be held! */ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) { struct cfg80211_registered_device *rdev; - assert_cfg80211_lock(); + ASSERT_RTNL(); rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx); if (!rdev) @@ -90,14 +86,13 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) return &rdev->wiphy; } -/* requires cfg80211_mutex to be held */ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, char *newname) { struct cfg80211_registered_device *rdev2; int wiphy_idx, taken = -1, result, digits; - assert_cfg80211_lock(); + ASSERT_RTNL(); /* prohibit calling the thing phy%d when %d is not its number */ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); @@ -195,8 +190,7 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { - lockdep_assert_held(&rdev->devlist_mtx); - lockdep_assert_held(&rdev->sched_scan_mtx); + ASSERT_RTNL(); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) return; @@ -235,8 +229,6 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) rtnl_lock(); - /* read-only iteration need not hold the devlist_mtx */ - list_for_each_entry(wdev, &rdev->wdev_list, list) { if (wdev->netdev) { dev_close(wdev->netdev); @@ -245,12 +237,7 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) /* otherwise, check iftype */ switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: - /* but this requires it */ - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); cfg80211_stop_p2p_device(rdev, wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); break; default: break; @@ -278,10 +265,7 @@ static void cfg80211_event_work(struct work_struct *work) event_work); rtnl_lock(); - cfg80211_lock_rdev(rdev); - cfg80211_process_rdev_events(rdev); - cfg80211_unlock_rdev(rdev); rtnl_unlock(); } @@ -323,9 +307,6 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) /* give it a proper name */ dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); - mutex_init(&rdev->mtx); - mutex_init(&rdev->devlist_mtx); - mutex_init(&rdev->sched_scan_mtx); INIT_LIST_HEAD(&rdev->wdev_list); INIT_LIST_HEAD(&rdev->beacon_registrations); spin_lock_init(&rdev->beacon_registrations_lock); @@ -573,11 +554,11 @@ int wiphy_register(struct wiphy *wiphy) /* check and set up bitrates */ ieee80211_set_bitrate_flags(wiphy); - mutex_lock(&cfg80211_mutex); + rtnl_lock(); res = device_add(&rdev->wiphy.dev); if (res) { - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); return res; } @@ -606,25 +587,18 @@ int wiphy_register(struct wiphy *wiphy) } cfg80211_debugfs_rdev_add(rdev); - mutex_unlock(&cfg80211_mutex); - /* - * due to a locking dependency this has to be outside of the - * cfg80211_mutex lock - */ res = rfkill_register(rdev->rfkill); if (res) { device_del(&rdev->wiphy.dev); - mutex_lock(&cfg80211_mutex); debugfs_remove_recursive(rdev->wiphy.debugfsdir); list_del_rcu(&rdev->list); wiphy_regulatory_deregister(wiphy); - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); return res; } - rtnl_lock(); rdev->wiphy.registered = true; rtnl_unlock(); return 0; @@ -654,25 +628,19 @@ void wiphy_unregister(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); - rtnl_lock(); - rdev->wiphy.registered = false; - rtnl_unlock(); - - rfkill_unregister(rdev->rfkill); - - /* protect the device list */ - mutex_lock(&cfg80211_mutex); - wait_event(rdev->dev_wait, ({ int __count; - mutex_lock(&rdev->devlist_mtx); + rtnl_lock(); __count = rdev->opencount; - mutex_unlock(&rdev->devlist_mtx); + rtnl_unlock(); __count == 0; })); - mutex_lock(&rdev->devlist_mtx); + rtnl_lock(); + rdev->wiphy.registered = false; + + rfkill_unregister(rdev->rfkill); + BUG_ON(!list_empty(&rdev->wdev_list)); - mutex_unlock(&rdev->devlist_mtx); /* * First remove the hardware from everywhere, this makes @@ -682,20 +650,6 @@ void wiphy_unregister(struct wiphy *wiphy) list_del_rcu(&rdev->list); synchronize_rcu(); - /* - * Try to grab rdev->mtx. If a command is still in progress, - * hopefully the driver will refuse it since it's tearing - * down the device already. We wait for this command to complete - * before unlinking the item from the list. - * Note: as codified by the BUG_ON above we cannot get here if - * a virtual interface is still present. Hence, we can only get - * to lock contention here if userspace issues a command that - * identified the hardware by wiphy index. - */ - cfg80211_lock_rdev(rdev); - /* nothing */ - cfg80211_unlock_rdev(rdev); - /* * If this device got a regulatory hint tell core its * free to listen now to a new shiny device regulatory hint @@ -705,7 +659,7 @@ void wiphy_unregister(struct wiphy *wiphy) cfg80211_rdev_list_generation++; device_del(&rdev->wiphy.dev); - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); flush_work(&rdev->scan_done_wk); cancel_work_sync(&rdev->conn_work); @@ -723,9 +677,6 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) struct cfg80211_internal_bss *scan, *tmp; struct cfg80211_beacon_registration *reg, *treg; rfkill_destroy(rdev->rfkill); - mutex_destroy(&rdev->mtx); - mutex_destroy(&rdev->devlist_mtx); - mutex_destroy(&rdev->sched_scan_mtx); list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { list_del(®->list); kfree(reg); @@ -750,36 +701,6 @@ void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) } EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); -static void wdev_cleanup_work(struct work_struct *work) -{ - struct wireless_dev *wdev; - struct cfg80211_registered_device *rdev; - - wdev = container_of(work, struct wireless_dev, cleanup_work); - rdev = wiphy_to_dev(wdev->wiphy); - - mutex_lock(&rdev->sched_scan_mtx); - - if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { - rdev->scan_req->aborted = true; - ___cfg80211_scan_done(rdev, true); - } - - if (WARN_ON(rdev->sched_scan_req && - rdev->sched_scan_req->dev == wdev->netdev)) { - __cfg80211_stop_sched_scan(rdev, false); - } - - mutex_unlock(&rdev->sched_scan_mtx); - - mutex_lock(&rdev->devlist_mtx); - rdev->opencount--; - mutex_unlock(&rdev->devlist_mtx); - wake_up(&rdev->dev_wait); - - dev_put(wdev->netdev); -} - void cfg80211_unregister_wdev(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); @@ -789,8 +710,6 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) if (WARN_ON(wdev->netdev)) return; - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); list_del_rcu(&wdev->list); rdev->devlist_generation++; @@ -802,8 +721,6 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) WARN_ON_ONCE(1); break; } - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); } EXPORT_SYMBOL(cfg80211_unregister_wdev); @@ -822,7 +739,7 @@ void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, } void cfg80211_leave(struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev) + struct wireless_dev *wdev) { struct net_device *dev = wdev->netdev; @@ -832,9 +749,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: - mutex_lock(&rdev->sched_scan_mtx); __cfg80211_stop_sched_scan(rdev, false); - mutex_unlock(&rdev->sched_scan_mtx); wdev_lock(wdev); #ifdef CONFIG_CFG80211_WEXT @@ -887,13 +802,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, * are added with nl80211. */ mutex_init(&wdev->mtx); - INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); INIT_LIST_HEAD(&wdev->event_list); spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock); - mutex_lock(&rdev->devlist_mtx); wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wdev_list); rdev->devlist_generation++; @@ -906,7 +819,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, } wdev->netdev = dev; wdev->sme_state = CFG80211_SME_IDLE; - mutex_unlock(&rdev->devlist_mtx); #ifdef CONFIG_CFG80211_WEXT wdev->wext.default_key = -1; wdev->wext.default_mgmt_key = -1; @@ -932,26 +844,22 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, break; case NETDEV_DOWN: cfg80211_update_iface_num(rdev, wdev->iftype, -1); - dev_hold(dev); - queue_work(cfg80211_wq, &wdev->cleanup_work); + if (rdev->scan_req && rdev->scan_req->wdev == wdev) { + if (WARN_ON(!rdev->scan_req->notified)) + rdev->scan_req->aborted = true; + ___cfg80211_scan_done(rdev, true); + } + + if (WARN_ON(rdev->sched_scan_req && + rdev->sched_scan_req->dev == wdev->netdev)) { + __cfg80211_stop_sched_scan(rdev, false); + } + + rdev->opencount--; + wake_up(&rdev->dev_wait); break; case NETDEV_UP: - /* - * If we have a really quick DOWN/UP succession we may - * have this work still pending ... cancel it and see - * if it was pending, in which case we need to account - * for some of the work it would have done. - */ - if (cancel_work_sync(&wdev->cleanup_work)) { - mutex_lock(&rdev->devlist_mtx); - rdev->opencount--; - mutex_unlock(&rdev->devlist_mtx); - dev_put(dev); - } cfg80211_update_iface_num(rdev, wdev->iftype, 1); - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); switch (wdev->iftype) { #ifdef CONFIG_CFG80211_WEXT @@ -983,10 +891,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, break; } wdev_unlock(wdev); - mutex_unlock(&rdev->sched_scan_mtx); rdev->opencount++; - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); /* * Configure power management to the driver here so that its @@ -1002,12 +907,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, } break; case NETDEV_UNREGISTER: - /* - * NB: cannot take rdev->mtx here because this may be - * called within code protected by it when interfaces - * are removed with nl80211. - */ - mutex_lock(&rdev->devlist_mtx); /* * It is possible to get NETDEV_UNREGISTER * multiple times. To detect that, check @@ -1024,7 +923,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, kfree(wdev->wext.keys); #endif } - mutex_unlock(&rdev->devlist_mtx); /* * synchronise (so that we won't find this netdev * from other code any more) and then clear the list @@ -1044,9 +942,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, return notifier_from_errno(-EOPNOTSUPP); if (rfkill_blocked(rdev->rfkill)) return notifier_from_errno(-ERFKILL); - mutex_lock(&rdev->devlist_mtx); ret = cfg80211_can_add_interface(rdev, wdev->iftype); - mutex_unlock(&rdev->devlist_mtx); if (ret) return notifier_from_errno(ret); break; @@ -1064,12 +960,10 @@ static void __net_exit cfg80211_pernet_exit(struct net *net) struct cfg80211_registered_device *rdev; rtnl_lock(); - mutex_lock(&cfg80211_mutex); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (net_eq(wiphy_net(&rdev->wiphy), net)) WARN_ON(cfg80211_switch_netns(rdev, &init_net)); } - mutex_unlock(&cfg80211_mutex); rtnl_unlock(); } diff --git a/net/wireless/core.h b/net/wireless/core.h index 95b29075a9c8..d21a0fc01401 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -5,7 +5,6 @@ */ #ifndef __NET_WIRELESS_CORE_H #define __NET_WIRELESS_CORE_H -#include #include #include #include @@ -23,11 +22,6 @@ struct cfg80211_registered_device { const struct cfg80211_ops *ops; struct list_head list; - /* we hold this mutex during any call so that - * we cannot do multiple calls at once, and also - * to avoid the deregister call to proceed while - * any call is in progress */ - struct mutex mtx; /* rfkill support */ struct rfkill_ops rfkill_ops; @@ -49,9 +43,7 @@ struct cfg80211_registered_device { /* wiphy index, internal only */ int wiphy_idx; - /* associated wireless interfaces */ - struct mutex devlist_mtx; - /* protected by devlist_mtx or RCU */ + /* associated wireless interfaces, protected by rtnl or RCU */ struct list_head wdev_list; int devlist_generation, wdev_id; int opencount; /* also protected by devlist_mtx */ @@ -75,8 +67,6 @@ struct cfg80211_registered_device { struct work_struct scan_done_wk; struct work_struct sched_scan_results_wk; - struct mutex sched_scan_mtx; - #ifdef CONFIG_NL80211_TESTMODE struct genl_info *testmode_info; #endif @@ -120,15 +110,9 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev) } extern struct workqueue_struct *cfg80211_wq; -extern struct mutex cfg80211_mutex; extern struct list_head cfg80211_rdev_list; extern int cfg80211_rdev_list_generation; -static inline void assert_cfg80211_lock(void) -{ - lockdep_assert_held(&cfg80211_mutex); -} - struct cfg80211_internal_bss { struct list_head list; struct list_head hidden_list; @@ -161,23 +145,11 @@ static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss) struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx); int get_wiphy_idx(struct wiphy *wiphy); -/* requires cfg80211_rdev_mutex to be held! */ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net); -static inline void cfg80211_lock_rdev(struct cfg80211_registered_device *rdev) -{ - mutex_lock(&rdev->mtx); -} - -static inline void cfg80211_unlock_rdev(struct cfg80211_registered_device *rdev) -{ - BUG_ON(IS_ERR(rdev) || !rdev); - mutex_unlock(&rdev->mtx); -} - static inline void wdev_lock(struct wireless_dev *wdev) __acquires(wdev) { @@ -192,7 +164,7 @@ static inline void wdev_unlock(struct wireless_dev *wdev) mutex_unlock(&wdev->mtx); } -#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx) +#define ASSERT_RDEV_LOCK(rdev) ASSERT_RTNL() #define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx) static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev) diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 920cabe0461b..90d050036624 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -74,7 +74,7 @@ static ssize_t ht40allow_map_read(struct file *file, if (!buf) return -ENOMEM; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; @@ -85,7 +85,7 @@ static ssize_t ht40allow_map_read(struct file *file, buf, buf_size, offset); } - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index d80e47194d49..5449c5a6de84 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -152,11 +152,11 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev = dev->ieee80211_ptr; int err; - mutex_lock(&rdev->devlist_mtx); + ASSERT_RTNL(); + wdev_lock(wdev); err = __cfg80211_join_ibss(rdev, dev, params, connkeys); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -359,11 +359,9 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, wdev->wext.ibss.channel_fixed = false; } - mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -429,11 +427,9 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev, memcpy(wdev->wext.ibss.ssid, ssid, len); wdev->wext.ibss.ssid_len = len; - mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -512,11 +508,9 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev, } else wdev->wext.ibss.bssid = NULL; - mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 9546ad210550..5dfb289ab761 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -186,11 +186,9 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev = dev->ieee80211_ptr; int err; - mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = __cfg80211_join_mesh(rdev, dev, setup, conf); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index c21e32f9549c..68b40f21bc38 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -313,14 +313,14 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, { int err; - mutex_lock(&rdev->devlist_mtx); + ASSERT_RTNL(); + wdev_lock(dev->ieee80211_ptr); err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, ssid, ssid_len, ie, ie_len, key, key_len, key_idx, sae_data, sae_data_len); wdev_unlock(dev->ieee80211_ptr); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -424,12 +424,12 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev = dev->ieee80211_ptr; int err; - mutex_lock(&rdev->devlist_mtx); + ASSERT_RTNL(); + wdev_lock(wdev); err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, ssid, ssid_len, req); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -844,7 +844,7 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) dfs_update_channels_wk); wiphy = &rdev->wiphy; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) { sband = wiphy->bands[bandid]; if (!sband) @@ -877,7 +877,7 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) check_again = true; } } - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); /* reschedule if there are other channels waiting to be cleared again */ if (check_again) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5bcf3a5b6465..74cdb1a0cf31 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -59,7 +59,7 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) int wiphy_idx = -1; int ifidx = -1; - assert_cfg80211_lock(); + ASSERT_RTNL(); if (!have_ifidx && !have_wdev_id) return ERR_PTR(-EINVAL); @@ -80,7 +80,6 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) if (have_wdev_id && rdev->wiphy_idx != wiphy_idx) continue; - mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { if (have_ifidx && wdev->netdev && wdev->netdev->ifindex == ifidx) { @@ -92,7 +91,6 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) break; } } - mutex_unlock(&rdev->devlist_mtx); if (result) break; @@ -109,7 +107,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) struct cfg80211_registered_device *rdev = NULL, *tmp; struct net_device *netdev; - assert_cfg80211_lock(); + ASSERT_RTNL(); if (!attrs[NL80211_ATTR_WIPHY] && !attrs[NL80211_ATTR_IFINDEX] && @@ -128,14 +126,12 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32); if (tmp) { /* make sure wdev exists */ - mutex_lock(&tmp->devlist_mtx); list_for_each_entry(wdev, &tmp->wdev_list, list) { if (wdev->identifier != (u32)wdev_id) continue; found = true; break; } - mutex_unlock(&tmp->devlist_mtx); if (!found) tmp = NULL; @@ -182,19 +178,6 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) /* * This function returns a pointer to the driver * that the genl_info item that is passed refers to. - * If successful, it returns non-NULL and also locks - * the driver's mutex! - * - * This means that you need to call cfg80211_unlock_rdev() - * before being allowed to acquire &cfg80211_mutex! - * - * This is necessary because we need to lock the global - * mutex to get an item off the list safely, and then - * we lock the rdev mutex so it doesn't go away under us. - * - * We don't want to keep cfg80211_mutex locked - * for all the time in order to allow requests on - * other interfaces to go through at the same time. * * The result of this can be a PTR_ERR and hence must * be checked with IS_ERR() for errors. @@ -202,20 +185,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) static struct cfg80211_registered_device * cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info) { - struct cfg80211_registered_device *rdev; - - mutex_lock(&cfg80211_mutex); - rdev = __cfg80211_rdev_from_attrs(netns, info->attrs); - - /* if it is not an error we grab the lock on - * it to assure it won't be going away while - * we operate on it */ - if (!IS_ERR(rdev)) - mutex_lock(&rdev->mtx); - - mutex_unlock(&cfg80211_mutex); - - return rdev; + return __cfg80211_rdev_from_attrs(netns, info->attrs); } /* policy for the attributes */ @@ -456,7 +426,6 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, int err; rtnl_lock(); - mutex_lock(&cfg80211_mutex); if (!cb->args[0]) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, @@ -485,14 +454,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, *rdev = wiphy_to_dev(wiphy); *wdev = NULL; - mutex_lock(&(*rdev)->devlist_mtx); list_for_each_entry(tmp, &(*rdev)->wdev_list, list) { if (tmp->identifier == cb->args[1]) { *wdev = tmp; break; } } - mutex_unlock(&(*rdev)->devlist_mtx); if (!*wdev) { err = -ENODEV; @@ -500,19 +467,14 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, } } - cfg80211_lock_rdev(*rdev); - - mutex_unlock(&cfg80211_mutex); return 0; out_unlock: - mutex_unlock(&cfg80211_mutex); rtnl_unlock(); return err; } static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev) { - cfg80211_unlock_rdev(rdev); rtnl_unlock(); } @@ -1568,7 +1530,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) struct nlattr **tb = nl80211_fam.attrbuf; int res; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, tb, nl80211_fam.maxattr, nl80211_policy); if (res == 0) { @@ -1582,10 +1544,8 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]); netdev = dev_get_by_index(sock_net(skb->sk), ifidx); - if (!netdev) { - mutex_unlock(&cfg80211_mutex); + if (!netdev) return -ENODEV; - } if (netdev->ieee80211_ptr) { dev = wiphy_to_dev( netdev->ieee80211_ptr->wiphy); @@ -1629,7 +1589,6 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) !skb->len && cb->min_dump_alloc < 4096) { cb->min_dump_alloc = 4096; - mutex_unlock(&cfg80211_mutex); return 1; } idx--; @@ -1638,7 +1597,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) } while (cb->args[1] > 0); break; } - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); cb->args[0] = idx; @@ -1793,7 +1752,6 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, if (result) return result; - mutex_lock(&rdev->devlist_mtx); switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: @@ -1817,7 +1775,6 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, default: result = -EINVAL; } - mutex_unlock(&rdev->devlist_mtx); return result; } @@ -1866,6 +1823,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) u32 frag_threshold = 0, rts_threshold = 0; u8 coverage_class = 0; + ASSERT_RTNL(); + /* * Try to find the wiphy and netdev. Normally this * function shouldn't need the netdev, but this is @@ -1875,31 +1834,25 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) * also passed a netdev to set_wiphy, so that it is * possible to let that go to the right netdev! */ - mutex_lock(&cfg80211_mutex); if (info->attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); netdev = dev_get_by_index(genl_info_net(info), ifindex); - if (netdev && netdev->ieee80211_ptr) { + if (netdev && netdev->ieee80211_ptr) rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy); - mutex_lock(&rdev->mtx); - } else + else netdev = NULL; } if (!netdev) { rdev = __cfg80211_rdev_from_attrs(genl_info_net(info), info->attrs); - if (IS_ERR(rdev)) { - mutex_unlock(&cfg80211_mutex); + if (IS_ERR(rdev)) return PTR_ERR(rdev); - } wdev = NULL; netdev = NULL; result = 0; - - mutex_lock(&rdev->mtx); } else wdev = netdev->ieee80211_ptr; @@ -1912,8 +1865,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) result = cfg80211_dev_rename( rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); - mutex_unlock(&cfg80211_mutex); - if (result) goto bad_res; @@ -2120,7 +2071,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) } bad_res: - mutex_unlock(&rdev->mtx); if (netdev) dev_put(netdev); return result; @@ -2218,7 +2168,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; @@ -2228,7 +2178,6 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * } if_idx = 0; - mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { if (if_idx < if_start) { if_idx++; @@ -2237,17 +2186,15 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev) < 0) { - mutex_unlock(&rdev->devlist_mtx); goto out; } if_idx++; } - mutex_unlock(&rdev->devlist_mtx); wp_idx++; } out: - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); cb->args[0] = wp_idx; cb->args[1] = if_idx; @@ -2480,11 +2427,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock); - mutex_lock(&rdev->devlist_mtx); wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wdev_list); rdev->devlist_generation++; - mutex_unlock(&rdev->devlist_mtx); break; default: break; @@ -2993,8 +2938,6 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev; bool ret = false; - mutex_lock(&rdev->devlist_mtx); - list_for_each_entry(wdev, &rdev->wdev_list, list) { if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) @@ -3008,8 +2951,6 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, break; } - mutex_unlock(&rdev->devlist_mtx); - return ret; } @@ -3171,13 +3112,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) params.radar_required = true; } - mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, params.chandef.chan, CHAN_MODE_SHARED, radar_detect_width); - mutex_unlock(&rdev->devlist_mtx); - if (err) return err; @@ -4914,18 +4852,13 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) void *hdr = NULL; struct nlattr *nl_reg_rules; unsigned int i; - int err = -EINVAL; - - mutex_lock(&cfg80211_mutex); if (!cfg80211_regdomain) - goto out; + return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) { - err = -ENOBUFS; - goto out; - } + if (!msg) + return -ENOBUFS; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_REG); @@ -4984,8 +4917,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) nla_nest_end(msg, nl_reg_rules); genlmsg_end(msg, hdr); - err = genlmsg_reply(msg, info); - goto out; + return genlmsg_reply(msg, info); nla_put_failure_rcu: rcu_read_unlock(); @@ -4993,10 +4925,7 @@ nla_put_failure: genlmsg_cancel(msg, hdr); put_failure: nlmsg_free(msg); - err = -EMSGSIZE; -out: - mutex_unlock(&cfg80211_mutex); - return err; + return -EMSGSIZE; } static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) @@ -5062,12 +4991,9 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) } } - mutex_lock(&cfg80211_mutex); - r = set_regdom(rd); /* set_regdom took ownership */ rd = NULL; - mutex_unlock(&cfg80211_mutex); bad_reg: kfree(rd); @@ -5117,7 +5043,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->scan) return -EOPNOTSUPP; - mutex_lock(&rdev->sched_scan_mtx); if (rdev->scan_req) { err = -EBUSY; goto unlock; @@ -5303,7 +5228,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) } unlock: - mutex_unlock(&rdev->sched_scan_mtx); return err; } @@ -5375,8 +5299,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (ie_len > wiphy->max_sched_scan_ie_len) return -EINVAL; - mutex_lock(&rdev->sched_scan_mtx); - if (rdev->sched_scan_req) { err = -EINPROGRESS; goto out; @@ -5544,7 +5466,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, out_free: kfree(request); out: - mutex_unlock(&rdev->sched_scan_mtx); return err; } @@ -5552,17 +5473,12 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; - int err; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP; - mutex_lock(&rdev->sched_scan_mtx); - err = __cfg80211_stop_sched_scan(rdev, false); - mutex_unlock(&rdev->sched_scan_mtx); - - return err; + return __cfg80211_stop_sched_scan(rdev, false); } static int nl80211_start_radar_detection(struct sk_buff *skb, @@ -5594,12 +5510,11 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, if (!rdev->ops->start_radar_detection) return -EOPNOTSUPP; - mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, chandef.chan, CHAN_MODE_SHARED, BIT(chandef.width)); if (err) - goto err_locked; + return err; err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef); if (!err) { @@ -5607,9 +5522,6 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, wdev->cac_started = true; wdev->cac_start_time = jiffies; } -err_locked: - mutex_unlock(&rdev->devlist_mtx); - return err; } @@ -6472,6 +6384,8 @@ static int nl80211_testmode_dump(struct sk_buff *skb, void *data = NULL; int data_len = 0; + rtnl_lock(); + if (cb->args[0]) { /* * 0 is a valid index, but not valid for args[0], @@ -6483,18 +6397,16 @@ static int nl80211_testmode_dump(struct sk_buff *skb, nl80211_fam.attrbuf, nl80211_fam.maxattr, nl80211_policy); if (err) - return err; + goto out_err; - mutex_lock(&cfg80211_mutex); rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), nl80211_fam.attrbuf); if (IS_ERR(rdev)) { - mutex_unlock(&cfg80211_mutex); - return PTR_ERR(rdev); + err = PTR_ERR(rdev); + goto out_err; } phy_idx = rdev->wiphy_idx; rdev = NULL; - mutex_unlock(&cfg80211_mutex); if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]) cb->args[1] = @@ -6506,14 +6418,11 @@ static int nl80211_testmode_dump(struct sk_buff *skb, data_len = nla_len((void *)cb->args[1]); } - mutex_lock(&cfg80211_mutex); rdev = cfg80211_rdev_by_wiphy_idx(phy_idx); if (!rdev) { - mutex_unlock(&cfg80211_mutex); - return -ENOENT; + err = -ENOENT; + goto out_err; } - cfg80211_lock_rdev(rdev); - mutex_unlock(&cfg80211_mutex); if (!rdev->ops->testmode_dump) { err = -EOPNOTSUPP; @@ -6554,7 +6463,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, /* see above */ cb->args[0] = phy_idx + 1; out_err: - cfg80211_unlock_rdev(rdev); + rtnl_unlock(); return err; } @@ -8189,9 +8098,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) if (wdev->p2p_started) return 0; - mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_add_interface(rdev, wdev->iftype); - mutex_unlock(&rdev->devlist_mtx); if (err) return err; @@ -8200,9 +8107,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) return err; wdev->p2p_started = true; - mutex_lock(&rdev->devlist_mtx); rdev->opencount++; - mutex_unlock(&rdev->devlist_mtx); return 0; } @@ -8218,11 +8123,7 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->stop_p2p_device) return -EOPNOTSUPP; - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); cfg80211_stop_p2p_device(rdev, wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); return 0; } @@ -8365,11 +8266,11 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, info->user_ptr[0] = rdev; } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV || ops->internal_flags & NL80211_FLAG_NEED_WDEV) { - mutex_lock(&cfg80211_mutex); + ASSERT_RTNL(); + wdev = __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs); if (IS_ERR(wdev)) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return PTR_ERR(wdev); @@ -8380,7 +8281,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { if (!dev) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -EINVAL; @@ -8394,7 +8294,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, if (dev) { if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP && !netif_running(dev)) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -ENETDOWN; @@ -8403,17 +8302,12 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, dev_hold(dev); } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) { if (!wdev->p2p_started) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -ENETDOWN; } } - cfg80211_lock_rdev(rdev); - - mutex_unlock(&cfg80211_mutex); - info->user_ptr[0] = rdev; } @@ -8423,8 +8317,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { - if (info->user_ptr[0]) - cfg80211_unlock_rdev(info->user_ptr[0]); if (info->user_ptr[1]) { if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) { struct wireless_dev *wdev = info->user_ptr[1]; @@ -8446,7 +8338,8 @@ static struct genl_ops nl80211_ops[] = { .dumpit = nl80211_dump_wiphy, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ - .internal_flags = NL80211_FLAG_NEED_WIPHY, + .internal_flags = NL80211_FLAG_NEED_WIPHY | + NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WIPHY, @@ -8461,7 +8354,8 @@ static struct genl_ops nl80211_ops[] = { .dumpit = nl80211_dump_interface, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ - .internal_flags = NL80211_FLAG_NEED_WDEV, + .internal_flags = NL80211_FLAG_NEED_WDEV | + NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_INTERFACE, @@ -8620,6 +8514,7 @@ static struct genl_ops nl80211_ops[] = { .cmd = NL80211_CMD_GET_REG, .doit = nl80211_get_reg, .policy = nl80211_policy, + .internal_flags = NL80211_FLAG_NEED_RTNL, /* can be retrieved by unprivileged users */ }, { @@ -8627,6 +8522,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_set_reg, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_REQ_SET_REG, @@ -9082,8 +8978,6 @@ static int nl80211_add_scan_req(struct sk_buff *msg, struct nlattr *nest; int i; - lockdep_assert_held(&rdev->sched_scan_mtx); - if (WARN_ON(!req)) return 0; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index cc35fbaa4578..e76559618588 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -377,7 +377,7 @@ static void reg_regdb_search(struct work_struct *work) const struct ieee80211_regdomain *curdom, *regdom = NULL; int i; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); mutex_lock(®_regdb_search_mutex); while (!list_empty(®_regdb_search_list)) { @@ -402,7 +402,7 @@ static void reg_regdb_search(struct work_struct *work) if (!IS_ERR_OR_NULL(regdom)) set_regdom(regdom); - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); } static DECLARE_WORK(reg_regdb_work, reg_regdb_search); @@ -1225,7 +1225,7 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) struct cfg80211_registered_device *rdev; struct wiphy *wiphy; - assert_cfg80211_lock(); + ASSERT_RTNL(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { wiphy = &rdev->wiphy; @@ -1570,21 +1570,19 @@ static void reg_process_pending_hints(void) { struct regulatory_request *reg_request, *lr; - mutex_lock(&cfg80211_mutex); - mutex_lock(®_mutex); lr = get_last_request(); /* When last_request->processed becomes true this will be rescheduled */ if (lr && !lr->processed) { REG_DBG_PRINT("Pending regulatory request, waiting for it to be processed...\n"); - goto out; + return; } spin_lock(®_requests_lock); if (list_empty(®_requests_list)) { spin_unlock(®_requests_lock); - goto out; + return; } reg_request = list_first_entry(®_requests_list, @@ -1595,10 +1593,6 @@ static void reg_process_pending_hints(void) spin_unlock(®_requests_lock); reg_process_hint(reg_request, reg_request->initiator); - -out: - mutex_unlock(®_mutex); - mutex_unlock(&cfg80211_mutex); } /* Processes beacon hints -- this has nothing to do with country IEs */ @@ -1607,9 +1601,6 @@ static void reg_process_pending_beacon_hints(void) struct cfg80211_registered_device *rdev; struct reg_beacon *pending_beacon, *tmp; - mutex_lock(&cfg80211_mutex); - mutex_lock(®_mutex); - /* This goes through the _pending_ beacon list */ spin_lock_bh(®_pending_beacons_lock); @@ -1626,14 +1617,16 @@ static void reg_process_pending_beacon_hints(void) } spin_unlock_bh(®_pending_beacons_lock); - mutex_unlock(®_mutex); - mutex_unlock(&cfg80211_mutex); } static void reg_todo(struct work_struct *work) { + rtnl_lock(); + mutex_lock(®_mutex); reg_process_pending_hints(); reg_process_pending_beacon_hints(); + mutex_unlock(®_mutex); + rtnl_unlock(); } static void queue_regulatory_request(struct regulatory_request *request) @@ -1717,10 +1710,6 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) } EXPORT_SYMBOL(regulatory_hint); -/* - * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and - * therefore cannot iterate over the rdev list here. - */ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band, const u8 *country_ie, u8 country_ie_len) { @@ -1752,7 +1741,7 @@ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band, /* * We will run this only upon a successful connection on cfg80211. * We leave conflict resolution to the workqueue, where can hold - * cfg80211_mutex. + * the RTNL. */ if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && lr->wiphy_idx != WIPHY_IDX_INVALID) @@ -1858,7 +1847,8 @@ static void restore_regulatory_settings(bool reset_user) LIST_HEAD(tmp_reg_req_list); struct cfg80211_registered_device *rdev; - mutex_lock(&cfg80211_mutex); + ASSERT_RTNL(); + mutex_lock(®_mutex); reset_regdomains(true, &world_regdom); @@ -1915,7 +1905,6 @@ static void restore_regulatory_settings(bool reset_user) spin_unlock(®_requests_lock); mutex_unlock(®_mutex); - mutex_unlock(&cfg80211_mutex); REG_DBG_PRINT("Kicking the queue\n"); @@ -2297,7 +2286,6 @@ void wiphy_regulatory_register(struct wiphy *wiphy) mutex_unlock(®_mutex); } -/* Caller must hold cfg80211_mutex */ void wiphy_regulatory_deregister(struct wiphy *wiphy) { struct wiphy *request_wiphy = NULL; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 2ce44a712f13..dd01b58fa78c 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) union iwreq_data wrqu; #endif - lockdep_assert_held(&rdev->sched_scan_mtx); + ASSERT_RTNL(); request = rdev->scan_req; @@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk) rdev = container_of(wk, struct cfg80211_registered_device, scan_done_wk); - mutex_lock(&rdev->sched_scan_mtx); + rtnl_lock(); ___cfg80211_scan_done(rdev, false); - mutex_unlock(&rdev->sched_scan_mtx); + rtnl_unlock(); } void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) @@ -241,6 +241,7 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); request->aborted = aborted; + request->notified = true; queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk); } EXPORT_SYMBOL(cfg80211_scan_done); @@ -255,7 +256,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) request = rdev->sched_scan_req; - mutex_lock(&rdev->sched_scan_mtx); + rtnl_lock(); /* we don't have sched_scan_req anymore if the scan is stopping */ if (request) { @@ -270,7 +271,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) nl80211_send_sched_scan_results(rdev, request->dev); } - mutex_unlock(&rdev->sched_scan_mtx); + rtnl_unlock(); } void cfg80211_sched_scan_results(struct wiphy *wiphy) @@ -289,9 +290,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy) trace_cfg80211_sched_scan_stopped(wiphy); - mutex_lock(&rdev->sched_scan_mtx); + rtnl_lock(); __cfg80211_stop_sched_scan(rdev, true); - mutex_unlock(&rdev->sched_scan_mtx); + rtnl_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped); @@ -300,7 +301,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, { struct net_device *dev; - lockdep_assert_held(&rdev->sched_scan_mtx); + ASSERT_RTNL(); if (!rdev->sched_scan_req) return -ENOENT; @@ -1043,21 +1044,19 @@ EXPORT_SYMBOL(cfg80211_unlink_bss); static struct cfg80211_registered_device * cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) { - struct cfg80211_registered_device *rdev = ERR_PTR(-ENODEV); + struct cfg80211_registered_device *rdev; struct net_device *dev; - mutex_lock(&cfg80211_mutex); + ASSERT_RTNL(); + dev = dev_get_by_index(net, ifindex); if (!dev) - goto out; - if (dev->ieee80211_ptr) { + return ERR_PTR(-ENODEV); + if (dev->ieee80211_ptr) rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); - mutex_lock(&rdev->mtx); - } else + else rdev = ERR_PTR(-ENODEV); dev_put(dev); - out: - mutex_unlock(&cfg80211_mutex); return rdev; } @@ -1083,7 +1082,6 @@ int cfg80211_wext_siwscan(struct net_device *dev, if (IS_ERR(rdev)) return PTR_ERR(rdev); - mutex_lock(&rdev->sched_scan_mtx); if (rdev->scan_req) { err = -EBUSY; goto out; @@ -1190,9 +1188,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, dev_hold(dev); } out: - mutex_unlock(&rdev->sched_scan_mtx); kfree(creq); - cfg80211_unlock_rdev(rdev); return err; } EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan); @@ -1491,10 +1487,8 @@ int cfg80211_wext_giwscan(struct net_device *dev, if (IS_ERR(rdev)) return PTR_ERR(rdev); - if (rdev->scan_req) { - res = -EAGAIN; - goto out; - } + if (rdev->scan_req) + return -EAGAIN; res = ieee80211_scan_results(rdev, info, extra, data->length); data->length = 0; @@ -1503,8 +1497,6 @@ int cfg80211_wext_giwscan(struct net_device *dev, res = 0; } - out: - cfg80211_unlock_rdev(rdev); return res; } EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 3ed35c345cae..4dbf31407a56 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -43,35 +43,29 @@ static bool cfg80211_is_all_idle(void) struct wireless_dev *wdev; bool is_all_idle = true; - mutex_lock(&cfg80211_mutex); - /* * All devices must be idle as otherwise if you are actively * scanning some new beacon hints could be learned and would * count as new regulatory hints. */ list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - cfg80211_lock_rdev(rdev); list_for_each_entry(wdev, &rdev->wdev_list, list) { wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) is_all_idle = false; wdev_unlock(wdev); } - cfg80211_unlock_rdev(rdev); } - mutex_unlock(&cfg80211_mutex); - return is_all_idle; } static void disconnect_work(struct work_struct *work) { - if (!cfg80211_is_all_idle()) - return; - - regulatory_hint_disconnect(); + rtnl_lock(); + if (cfg80211_is_all_idle()) + regulatory_hint_disconnect(); + rtnl_unlock(); } static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); @@ -85,7 +79,6 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) ASSERT_RTNL(); ASSERT_RDEV_LOCK(rdev); ASSERT_WDEV_LOCK(wdev); - lockdep_assert_held(&rdev->sched_scan_mtx); if (rdev->scan_req) return -EBUSY; @@ -226,9 +219,6 @@ void cfg80211_conn_work(struct work_struct *work) u8 bssid_buf[ETH_ALEN], *bssid = NULL; rtnl_lock(); - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { if (!wdev->netdev) @@ -256,9 +246,6 @@ void cfg80211_conn_work(struct work_struct *work) wdev_unlock(wdev); } - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); rtnl_unlock(); } @@ -931,14 +918,9 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, { int err; - mutex_lock(&rdev->devlist_mtx); - /* might request scan - scan_mtx -> wdev_mtx dependency */ - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(dev->ieee80211_ptr); err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); wdev_unlock(dev->ieee80211_ptr); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); return err; } diff --git a/net/wireless/util.c b/net/wireless/util.c index 0962f107f57f..501724257af5 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -808,12 +808,8 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) ASSERT_RTNL(); ASSERT_RDEV_LOCK(rdev); - mutex_lock(&rdev->devlist_mtx); - list_for_each_entry(wdev, &rdev->wdev_list, list) cfg80211_process_wdev_events(wdev); - - mutex_unlock(&rdev->devlist_mtx); } int cfg80211_change_iface(struct cfg80211_registered_device *rdev, @@ -845,10 +841,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, return -EBUSY; if (ntype != otype && netif_running(dev)) { - mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, ntype); - mutex_unlock(&rdev->devlist_mtx); if (err) return err; @@ -1210,8 +1204,6 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, if (!beacon_int) return -EINVAL; - mutex_lock(&rdev->devlist_mtx); - list_for_each_entry(wdev, &rdev->wdev_list, list) { if (!wdev->beacon_interval) continue; @@ -1221,8 +1213,6 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, } } - mutex_unlock(&rdev->devlist_mtx); - return res; } @@ -1246,7 +1236,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, int i, j; ASSERT_RTNL(); - lockdep_assert_held(&rdev->devlist_mtx); if (WARN_ON(hweight32(radar_detect) > 1)) return -EINVAL; diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index d997d0f0c54a..e7c6e862580d 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -72,7 +72,6 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, struct cfg80211_registered_device *rdev; struct vif_params vifparams; enum nl80211_iftype type; - int ret; rdev = wiphy_to_dev(wdev->wiphy); @@ -98,11 +97,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, memset(&vifparams, 0, sizeof(vifparams)); - cfg80211_lock_rdev(rdev); - ret = cfg80211_change_iface(rdev, dev, type, NULL, &vifparams); - cfg80211_unlock_rdev(rdev); - - return ret; + return cfg80211_change_iface(rdev, dev, type, NULL, &vifparams); } EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode); @@ -579,13 +574,10 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, { int err; - /* devlist mutex needed for possible IBSS re-join */ - mutex_lock(&rdev->devlist_mtx); wdev_lock(dev->ieee80211_ptr); err = __cfg80211_set_encryption(rdev, dev, pairwise, addr, remove, tx_key, idx, params); wdev_unlock(dev->ieee80211_ptr); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -787,7 +779,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, struct cfg80211_chan_def chandef = { .width = NL80211_CHAN_WIDTH_20_NOHT, }; - int freq, err; + int freq; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: @@ -804,10 +796,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (!chandef.chan) return -EINVAL; - mutex_lock(&rdev->devlist_mtx); - err = cfg80211_set_monitor_channel(rdev, &chandef); - mutex_unlock(&rdev->devlist_mtx); - return err; + return cfg80211_set_monitor_channel(rdev, &chandef); case NL80211_IFTYPE_MESH_POINT: freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); if (freq < 0) @@ -818,10 +807,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (!chandef.chan) return -EINVAL; - mutex_lock(&rdev->devlist_mtx); - err = cfg80211_set_mesh_channel(rdev, wdev, &chandef); - mutex_unlock(&rdev->devlist_mtx); - return err; + return cfg80211_set_mesh_channel(rdev, wdev, &chandef); default: return -EOPNOTSUPP; } diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index e79cb5c0655a..aeefd6817189 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -87,9 +87,6 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, return -EINVAL; } - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) { @@ -136,9 +133,6 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); return err; } @@ -190,9 +184,6 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, if (len > 0 && ssid[len - 1] == '\0') len--; - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); err = 0; @@ -226,9 +217,6 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); return err; } @@ -287,9 +275,6 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) { @@ -318,9 +303,6 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); return err; } -- cgit v1.2.3 From 8d61ffa5e01c5f676431d12caba17db164a48a86 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 10 May 2013 12:32:47 +0200 Subject: cfg80211/mac80211: use cfg80211 wdev mutex in mac80211 Using separate locks in cfg80211 and mac80211 has always caused issues, for example having to unlock in places in mac80211 to call cfg80211, which even needed a framework to make cfg80211 calls after some functions returned etc. Additionally, I suspect some issues people have reported with the cfg80211 state getting confused could be due to such issues, when cfg80211 is asking mac80211 to change state but mac80211 is in the process of telling cfg80211 that the state changed (in another way.) Signed-off-by: Johannes Berg --- Documentation/DocBook/80211.tmpl | 2 - include/net/cfg80211.h | 51 +++--- net/mac80211/cfg.c | 4 +- net/mac80211/debugfs_netdev.c | 10 +- net/mac80211/ht.c | 4 +- net/mac80211/ibss.c | 39 ++--- net/mac80211/ieee80211_i.h | 25 ++- net/mac80211/main.c | 4 +- net/mac80211/mesh.c | 32 ++-- net/mac80211/mesh_plink.c | 7 +- net/mac80211/mlme.c | 341 ++++++++++++++------------------------- net/mac80211/util.c | 4 +- net/wireless/mlme.c | 48 +----- net/wireless/trace.h | 4 +- 14 files changed, 220 insertions(+), 355 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index 0f6a3edcd44b..ebe89694cf81 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -132,9 +132,7 @@ !Finclude/net/cfg80211.h cfg80211_send_rx_assoc !Finclude/net/cfg80211.h cfg80211_send_assoc_timeout !Finclude/net/cfg80211.h cfg80211_send_deauth -!Finclude/net/cfg80211.h __cfg80211_send_deauth !Finclude/net/cfg80211.h cfg80211_send_disassoc -!Finclude/net/cfg80211.h __cfg80211_send_disassoc !Finclude/net/cfg80211.h cfg80211_ibss_joined !Finclude/net/cfg80211.h cfg80211_connect_result !Finclude/net/cfg80211.h cfg80211_roamed diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5430f70c63b3..9f45d74ce3c2 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1867,7 +1867,9 @@ struct cfg80211_update_ft_ies_params { * @get_mpath: get a mesh path for the given parameters * @dump_mpath: dump mesh path callback -- resume dump at index @idx * @join_mesh: join the mesh network with the specified parameters + * (invoked with the wireless_dev mutex held) * @leave_mesh: leave the current mesh network + * (invoked with the wireless_dev mutex held) * * @get_mesh_config: Get the current mesh configuration * @@ -1894,20 +1896,28 @@ struct cfg80211_update_ft_ies_params { * the scan/scan_done bracket too. * * @auth: Request to authenticate with the specified peer + * (invoked with the wireless_dev mutex held) * @assoc: Request to (re)associate with the specified peer + * (invoked with the wireless_dev mutex held) * @deauth: Request to deauthenticate from the specified peer + * (invoked with the wireless_dev mutex held) * @disassoc: Request to disassociate from the specified peer + * (invoked with the wireless_dev mutex held) * * @connect: Connect to the ESS with the specified parameters. When connected, * call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS. * If the connection fails for some reason, call cfg80211_connect_result() * with the status from the AP. + * (invoked with the wireless_dev mutex held) * @disconnect: Disconnect from the BSS/ESS. + * (invoked with the wireless_dev mutex held) * * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call * cfg80211_ibss_joined(), also call that function when changing BSSID due * to a merge. + * (invoked with the wireless_dev mutex held) * @leave_ibss: Leave the IBSS. + * (invoked with the wireless_dev mutex held) * * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or * MESH mode) @@ -2851,7 +2861,8 @@ struct cfg80211_cached_keys; * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames * @mgmt_registrations_lock: lock for the list - * @mtx: mutex used to lock data in this struct + * @mtx: mutex used to lock data in this struct, may be used by drivers + * and some API functions require it held * @cleanup_work: work struct used for cleanup that can't be done directly * @beacon_interval: beacon interval used on this device for transmitting * beacons, 0 when not valid @@ -3424,7 +3435,8 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); * This function is called whenever an authentication has been processed in * station mode. The driver is required to call either this function or * cfg80211_send_auth_timeout() to indicate the result of cfg80211_ops::auth() - * call. This function may sleep. + * call. This function may sleep. The caller must hold the corresponding wdev's + * mutex. */ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len); @@ -3433,7 +3445,8 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len); * @dev: network device * @addr: The MAC address of the device with which the authentication timed out * - * This function may sleep. + * This function may sleep. The caller must hold the corresponding wdev's + * mutex. */ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr); @@ -3448,7 +3461,8 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr); * This function is called whenever a (re)association response has been * processed in station mode. The driver is required to call either this * function or cfg80211_send_assoc_timeout() to indicate the result of - * cfg80211_ops::assoc() call. This function may sleep. + * cfg80211_ops::assoc() call. This function may sleep. The caller must hold + * the corresponding wdev's mutex. */ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, const u8 *buf, size_t len); @@ -3458,7 +3472,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, * @dev: network device * @addr: The MAC address of the device with which the association timed out * - * This function may sleep. + * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr); @@ -3470,20 +3484,11 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr); * * This function is called whenever deauthentication has been processed in * station mode. This includes both received deauthentication frames and - * locally generated ones. This function may sleep. + * locally generated ones. This function may sleep. The caller must hold the + * corresponding wdev's mutex. */ void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); -/** - * __cfg80211_send_deauth - notification of processed deauthentication - * @dev: network device - * @buf: deauthentication frame (header + body) - * @len: length of the frame data - * - * Like cfg80211_send_deauth(), but doesn't take the wdev lock. - */ -void __cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); - /** * cfg80211_send_disassoc - notification of processed disassociation * @dev: network device @@ -3492,21 +3497,11 @@ void __cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); * * This function is called whenever disassociation has been processed in * station mode. This includes both received disassociation frames and locally - * generated ones. This function may sleep. + * generated ones. This function may sleep. The caller must hold the + * corresponding wdev's mutex. */ void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len); -/** - * __cfg80211_send_disassoc - notification of processed disassociation - * @dev: network device - * @buf: disassociation response frame (header + body) - * @len: length of the frame data - * - * Like cfg80211_send_disassoc(), but doesn't take the wdev lock. - */ -void __cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, - size_t len); - /** * cfg80211_send_unprot_deauth - notification of unprotected deauthentication * @dev: network device diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index eb4219051043..232edf78d5a9 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2318,7 +2318,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode old_req; int err; - lockdep_assert_held(&sdata->u.mgd.mtx); + lockdep_assert_held(&sdata->wdev.mtx); old_req = sdata->u.mgd.req_smps; sdata->u.mgd.req_smps = smps_mode; @@ -2375,9 +2375,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, local->dynamic_ps_forced_timeout = timeout; /* no change, but if automatic follow powersave */ - mutex_lock(&sdata->u.mgd.mtx); __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps); - mutex_unlock(&sdata->u.mgd.mtx); if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index f83074fe6670..cafe614ef93d 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -228,9 +228,9 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); err = __ieee80211_request_smps(sdata, smps_mode); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); return err; } @@ -313,16 +313,16 @@ static ssize_t ieee80211_if_parse_tkip_mic_test( case NL80211_IFTYPE_STATION: fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); if (!sdata->u.mgd.associated) { - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); dev_kfree_skb(skb); return -ENOTCONN; } memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, addr, ETH_ALEN); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); break; default: dev_kfree_skb(skb); diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index af8cee06e4f3..75dff338f581 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -429,9 +429,9 @@ void ieee80211_request_smps_work(struct work_struct *work) container_of(work, struct ieee80211_sub_if_data, u.mgd.request_smps_work); - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); __ieee80211_request_smps(sdata, sdata->u.mgd.driver_smps_mode); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); } void ieee80211_request_smps(struct ieee80211_vif *vif, diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 956ba6316da5..caa4b4f7f6e4 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -54,7 +54,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct beacon_data *presp; int frame_len; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); /* Reset own TSF to allow time synchronization work. */ drv_reset_tsf(local, sdata); @@ -74,7 +74,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, } presp = rcu_dereference_protected(ifibss->presp, - lockdep_is_held(&ifibss->mtx)); + lockdep_is_held(&sdata->wdev.mtx)); rcu_assign_pointer(ifibss->presp, NULL); if (presp) kfree_rcu(presp, rcu_head); @@ -263,7 +263,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, const struct cfg80211_bss_ies *ies; u64 tsf; - lockdep_assert_held(&sdata->u.ibss.mtx); + sdata_assert_lock(sdata); if (beacon_int < 10) beacon_int = 10; @@ -410,7 +410,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; u8 deauth_frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - lockdep_assert_held(&sdata->u.ibss.mtx); + sdata_assert_lock(sdata); if (len < 24 + 6) return; @@ -677,7 +677,7 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) int active = 0; struct sta_info *sta; - lockdep_assert_held(&sdata->u.ibss.mtx); + sdata_assert_lock(sdata); rcu_read_lock(); @@ -703,7 +703,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); @@ -734,7 +734,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) u16 capability; int i; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); if (ifibss->fixed_bssid) { memcpy(bssid, ifibss->bssid, ETH_ALEN); @@ -777,7 +777,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) int active_ibss; u16 capability; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); active_ibss = ieee80211_sta_active_ibss(sdata); ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss); @@ -847,10 +847,10 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, struct beacon_data *presp; u8 *pos, *end; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); presp = rcu_dereference_protected(ifibss->presp, - lockdep_is_held(&ifibss->mtx)); + lockdep_is_held(&sdata->wdev.mtx)); if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || len < 24 + 2 || !presp) @@ -934,7 +934,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); - mutex_lock(&sdata->u.ibss.mtx); + sdata_lock(sdata); if (!sdata->u.ibss.ssid_len) goto mgmt_out; /* not ready to merge yet */ @@ -957,7 +957,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, } mgmt_out: - mutex_unlock(&sdata->u.ibss.mtx); + sdata_unlock(sdata); } void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) @@ -965,7 +965,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct sta_info *sta; - mutex_lock(&ifibss->mtx); + sdata_lock(sdata); /* * Work could be scheduled after scan or similar @@ -1001,7 +1001,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) } out: - mutex_unlock(&ifibss->mtx); + sdata_unlock(sdata); } static void ieee80211_ibss_timer(unsigned long data) @@ -1018,7 +1018,6 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) setup_timer(&ifibss->timer, ieee80211_ibss_timer, (unsigned long) sdata); - mutex_init(&ifibss->mtx); INIT_LIST_HEAD(&ifibss->incomplete_stations); spin_lock_init(&ifibss->incomplete_lock); } @@ -1045,8 +1044,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, { u32 changed = 0; - mutex_lock(&sdata->u.ibss.mtx); - if (params->bssid) { memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); sdata->u.ibss.fixed_bssid = true; @@ -1079,8 +1076,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); sdata->u.ibss.ssid_len = params->ssid_len; - mutex_unlock(&sdata->u.ibss.mtx); - /* * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is * reserved, but an HT STA shall protect HT transmissions as though @@ -1116,8 +1111,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) struct sta_info *sta; struct beacon_data *presp; - mutex_lock(&sdata->u.ibss.mtx); - active_ibss = ieee80211_sta_active_ibss(sdata); if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { @@ -1161,7 +1154,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) /* remove beacon */ kfree(sdata->u.ibss.ie); presp = rcu_dereference_protected(ifibss->presp, - lockdep_is_held(&sdata->u.ibss.mtx)); + lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; @@ -1177,7 +1170,5 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) del_timer_sync(&sdata->u.ibss.timer); - mutex_unlock(&sdata->u.ibss.mtx); - return 0; } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ba3cd284d104..9eed6f1d1614 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -394,7 +394,6 @@ struct ieee80211_if_managed { bool nullfunc_failed; bool connection_loss; - struct mutex mtx; struct cfg80211_bss *associated; struct ieee80211_mgd_auth_data *auth_data; struct ieee80211_mgd_assoc_data *assoc_data; @@ -488,8 +487,6 @@ struct ieee80211_if_managed { struct ieee80211_if_ibss { struct timer_list timer; - struct mutex mtx; - unsigned long last_scan_completed; u32 basic_rates; @@ -580,8 +577,6 @@ struct ieee80211_if_mesh { bool accepting_plinks; int num_gates; struct beacon_data __rcu *beacon; - /* just protects beacon updates for now */ - struct mutex mtx; const u8 *ie; u8 ie_len; enum { @@ -778,6 +773,26 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) return container_of(p, struct ieee80211_sub_if_data, vif); } +static inline void sdata_lock(struct ieee80211_sub_if_data *sdata) + __acquires(&sdata->wdev.mtx) +{ + mutex_lock(&sdata->wdev.mtx); + __acquire(&sdata->wdev.mtx); +} + +static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata) + __releases(&sdata->wdev.mtx) +{ + mutex_unlock(&sdata->wdev.mtx); + __release(&sdata->wdev.mtx); +} + +static inline void +sdata_assert_lock(struct ieee80211_sub_if_data *sdata) +{ + lockdep_assert_held(&sdata->wdev.mtx); +} + static inline enum ieee80211_band ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) { diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 8a7bfc47d577..1998f1475267 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -331,7 +331,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, return NOTIFY_DONE; ifmgd = &sdata->u.mgd; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); /* Copy the addresses to the bss_conf list */ ifa = idev->ifa_list; @@ -349,7 +349,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_ARP_FILTER); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return NOTIFY_DONE; } diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index c14bb816c6a3..b3d1fdd46368 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -161,8 +161,11 @@ void mesh_sta_cleanup(struct sta_info *sta) del_timer_sync(&sta->plink_timer); } - if (changed) + if (changed) { + sdata_lock(sdata); ieee80211_mbss_info_change_notify(sdata, changed); + sdata_unlock(sdata); + } } int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) @@ -577,7 +580,9 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata) mesh_path_expire(sdata); changed = mesh_accept_plinks_update(sdata); + sdata_lock(sdata); ieee80211_mbss_info_change_notify(sdata, changed); + sdata_unlock(sdata); mod_timer(&ifmsh->housekeeping_timer, round_jiffies(jiffies + @@ -697,25 +702,21 @@ out_free: } static int -ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh) +ieee80211_mesh_rebuild_beacon(struct ieee80211_sub_if_data *sdata) { struct beacon_data *old_bcn; int ret; - mutex_lock(&ifmsh->mtx); - - old_bcn = rcu_dereference_protected(ifmsh->beacon, - lockdep_is_held(&ifmsh->mtx)); - ret = ieee80211_mesh_build_beacon(ifmsh); + old_bcn = rcu_dereference_protected(sdata->u.mesh.beacon, + lockdep_is_held(&sdata->wdev.mtx)); + ret = ieee80211_mesh_build_beacon(&sdata->u.mesh); if (ret) /* just reuse old beacon */ - goto out; + return ret; if (old_bcn) kfree_rcu(old_bcn, rcu_head); -out: - mutex_unlock(&ifmsh->mtx); - return ret; + return 0; } void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, @@ -726,7 +727,7 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT))) - if (ieee80211_mesh_rebuild_beacon(&sdata->u.mesh)) + if (ieee80211_mesh_rebuild_beacon(sdata)) return; ieee80211_bss_info_change_notify(sdata, changed); } @@ -788,12 +789,12 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) sdata->vif.bss_conf.enable_beacon = false; clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); - mutex_lock(&ifmsh->mtx); + sdata_lock(sdata); bcn = rcu_dereference_protected(ifmsh->beacon, - lockdep_is_held(&ifmsh->mtx)); + lockdep_is_held(&sdata->wdev.mtx)); rcu_assign_pointer(ifmsh->beacon, NULL); kfree_rcu(bcn, rcu_head); - mutex_unlock(&ifmsh->mtx); + sdata_unlock(sdata); /* flush STAs and mpaths on this iface */ sta_info_flush(sdata); @@ -1041,7 +1042,6 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) spin_lock_init(&ifmsh->mesh_preq_queue_lock); spin_lock_init(&ifmsh->sync_offset_lock); RCU_INIT_POINTER(ifmsh->beacon, NULL); - mutex_init(&ifmsh->mtx); sdata->vif.bss_conf.bssid = zero_addr; } diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 09bebed99416..6c4da99bc4fb 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -517,7 +517,9 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, ieee80211_mps_frame_release(sta, elems); out: rcu_read_unlock(); + sdata_lock(sdata); ieee80211_mbss_info_change_notify(sdata, changed); + sdata_unlock(sdata); } static void mesh_plink_timer(unsigned long data) @@ -1068,6 +1070,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); - if (changed) + if (changed) { + sdata_lock(sdata); ieee80211_mbss_info_change_notify(sdata, changed); + sdata_unlock(sdata); + } } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 1da3d6be8e11..f44f4caa69ee 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -90,41 +90,6 @@ MODULE_PARM_DESC(probe_wait_ms, */ #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4 -/* - * All cfg80211 functions have to be called outside a locked - * section so that they can acquire a lock themselves... This - * is much simpler than queuing up things in cfg80211, but we - * do need some indirection for that here. - */ -enum rx_mgmt_action { - /* no action required */ - RX_MGMT_NONE, - - /* caller must call cfg80211_send_deauth() */ - RX_MGMT_CFG80211_DEAUTH, - - /* caller must call cfg80211_send_disassoc() */ - RX_MGMT_CFG80211_DISASSOC, - - /* caller must call cfg80211_send_rx_auth() */ - RX_MGMT_CFG80211_RX_AUTH, - - /* caller must call cfg80211_send_rx_assoc() */ - RX_MGMT_CFG80211_RX_ASSOC, - - /* caller must call cfg80211_send_assoc_timeout() */ - RX_MGMT_CFG80211_ASSOC_TIMEOUT, - - /* used when a processed beacon causes a deauth */ - RX_MGMT_CFG80211_TX_DEAUTH, -}; - -/* utils */ -static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) -{ - lockdep_assert_held(&ifmgd->mtx); -} - /* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only @@ -135,13 +100,14 @@ static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) * has happened -- the work that runs from this timer will * do that. */ -static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout) +static void run_again(struct ieee80211_sub_if_data *sdata, + unsigned long timeout) { - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); - if (!timer_pending(&ifmgd->timer) || - time_before(timeout, ifmgd->timer.expires)) - mod_timer(&ifmgd->timer, timeout); + if (!timer_pending(&sdata->u.mgd.timer) || + time_before(timeout, sdata->u.mgd.timer.expires)) + mod_timer(&sdata->u.mgd.timer, timeout); } void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) @@ -652,7 +618,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) struct ieee80211_channel *chan; u32 rates = 0; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); @@ -962,7 +928,7 @@ static void ieee80211_chswitch_work(struct work_struct *work) if (!ieee80211_sdata_running(sdata)) return; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) goto out; @@ -985,7 +951,7 @@ static void ieee80211_chswitch_work(struct work_struct *work) IEEE80211_QUEUE_STOP_REASON_CSA); out: ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) @@ -1036,7 +1002,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, const struct ieee80211_ht_operation *ht_oper; int secondary_channel_offset = -1; - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); if (!cbss) return; @@ -1845,7 +1811,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; u32 changed = 0; - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); if (WARN_ON_ONCE(tx && !frame_buf)) return; @@ -2054,7 +2020,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) } ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); - run_again(ifmgd, ifmgd->probe_timeout); + run_again(sdata, ifmgd->probe_timeout); if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ieee80211_flush_queues(sdata->local, sdata); } @@ -2068,7 +2034,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, if (!ieee80211_sdata_running(sdata)) return; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) goto out; @@ -2122,7 +2088,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, ifmgd->probe_send_count = 0; ieee80211_mgd_probe_ap_send(sdata); out: - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, @@ -2138,7 +2104,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return NULL; - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); if (ifmgd->associated) cbss = ifmgd->associated; @@ -2171,9 +2137,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; } @@ -2184,13 +2150,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) ieee80211_wake_queues_by_reason(&sdata->local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); - mutex_unlock(&ifmgd->mtx); - /* - * must be outside lock due to cfg80211, - * but that's not a problem. - */ cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); + sdata_unlock(sdata); } static void ieee80211_beacon_connection_loss_work(struct work_struct *work) @@ -2257,7 +2219,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, { struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; - lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata); if (!assoc) { sta_info_destroy_addr(sdata, auth_data->bss->bssid); @@ -2298,27 +2260,26 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, auth_data->key_idx, tx_flags); } -static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 bssid[ETH_ALEN]; u16 auth_alg, auth_transaction, status_code; struct sta_info *sta; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (len < 24 + 6) - return RX_MGMT_NONE; + return; if (!ifmgd->auth_data || ifmgd->auth_data->done) - return RX_MGMT_NONE; + return; memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN); if (!ether_addr_equal(bssid, mgmt->bssid)) - return RX_MGMT_NONE; + return; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); @@ -2330,14 +2291,15 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, auth_transaction, ifmgd->auth_data->expected_transaction); - return RX_MGMT_NONE; + return; } if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); ieee80211_destroy_auth_data(sdata, false); - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; } switch (ifmgd->auth_data->algorithm) { @@ -2350,20 +2312,20 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, if (ifmgd->auth_data->expected_transaction != 4) { ieee80211_auth_challenge(sdata, mgmt, len); /* need another frame */ - return RX_MGMT_NONE; + return; } break; default: WARN_ONCE(1, "invalid auth alg %d", ifmgd->auth_data->algorithm); - return RX_MGMT_NONE; + return; } sdata_info(sdata, "authenticated\n"); ifmgd->auth_data->done = true; ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && ifmgd->auth_data->expected_transaction != 2) { @@ -2371,7 +2333,8 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, * Report auth frame to user space for processing since another * round of Authentication frames is still needed. */ - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; } /* move station state to auth */ @@ -2387,30 +2350,29 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, } mutex_unlock(&sdata->local->sta_mtx); - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; out_err: mutex_unlock(&sdata->local->sta_mtx); /* ignore frame -- wait for timeout */ - return RX_MGMT_NONE; } -static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *bssid = NULL; u16 reason_code; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (len < 24 + 2) - return RX_MGMT_NONE; + return; if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return; bssid = ifmgd->associated->bssid; @@ -2421,25 +2383,24 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, 0, 0, false, NULL); - return RX_MGMT_CFG80211_DEAUTH; + cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, len); } -static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (len < 24 + 2) - return RX_MGMT_NONE; + return; if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return; reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); @@ -2448,7 +2409,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, 0, 0, false, NULL); - return RX_MGMT_CFG80211_DISASSOC; + cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, len); } static void ieee80211_get_rates(struct ieee80211_supported_band *sband, @@ -2498,7 +2459,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, { struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; - lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata); if (!assoc) { sta_info_destroy_addr(sdata, assoc_data->bss->bssid); @@ -2679,10 +2640,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, return true; } -static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len, - struct cfg80211_bss **bss) +static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; @@ -2690,13 +2650,14 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems elems; u8 *pos; bool reassoc; + struct cfg80211_bss *bss; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (!assoc_data) - return RX_MGMT_NONE; + return; if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid)) - return RX_MGMT_NONE; + return; /* * AssocResp and ReassocResp have identical structure, so process both @@ -2704,7 +2665,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, */ if (len < 24 + 6) - return RX_MGMT_NONE; + return; reassoc = ieee80211_is_reassoc_req(mgmt->frame_control); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); @@ -2731,22 +2692,23 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, assoc_data->timeout = jiffies + msecs_to_jiffies(ms); assoc_data->timeout_started = true; if (ms > IEEE80211_ASSOC_TIMEOUT) - run_again(ifmgd, assoc_data->timeout); - return RX_MGMT_NONE; + run_again(sdata, assoc_data->timeout); + return; } - *bss = assoc_data->bss; + bss = assoc_data->bss; if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied association (code=%d)\n", mgmt->sa, status_code); ieee80211_destroy_assoc_data(sdata, false); } else { - if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { + if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) { /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, false); - cfg80211_put_bss(sdata->local->hw.wiphy, *bss); - return RX_MGMT_CFG80211_ASSOC_TIMEOUT; + cfg80211_put_bss(sdata->local->hw.wiphy, bss); + cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); + return; } sdata_info(sdata, "associated\n"); @@ -2758,7 +2720,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, ieee80211_destroy_assoc_data(sdata, true); } - return RX_MGMT_CFG80211_RX_ASSOC; + cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, len); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, @@ -2772,7 +2734,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel; bool need_ps = false; - lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata); if ((sdata->u.mgd.associated && ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) || @@ -2831,7 +2793,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, ifmgd = &sdata->u.mgd; - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return; /* ignore ProbeResp to foreign address */ @@ -2856,7 +2818,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, ifmgd->auth_data->tries = 0; ifmgd->auth_data->timeout = jiffies; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); } } @@ -2881,10 +2843,9 @@ static const u64 care_about_ies = (1ULL << WLAN_EID_HT_CAPABILITY) | (1ULL << WLAN_EID_HT_OPERATION); -static enum rx_mgmt_action -ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len, - u8 *deauth_buf, struct ieee80211_rx_status *rx_status) +static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; @@ -2899,24 +2860,25 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, u8 erp_value = 0; u32 ncrc; u8 *bssid; + u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); /* Process beacon from the current BSS */ baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; if (baselen > len) - return RX_MGMT_NONE; + return; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); - return RX_MGMT_NONE; + return; } if (rx_status->freq != chanctx_conf->def.chan->center_freq) { rcu_read_unlock(); - return RX_MGMT_NONE; + return; } chan = chanctx_conf->def.chan; rcu_read_unlock(); @@ -2943,13 +2905,13 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, /* continue assoc process */ ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; - run_again(ifmgd, ifmgd->assoc_data->timeout); - return RX_MGMT_NONE; + run_again(sdata, ifmgd->assoc_data->timeout); + return; } if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return; bssid = ifmgd->associated->bssid; /* Track average RSSI from the Beacon frames of the current AP */ @@ -3095,7 +3057,7 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, } if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) - return RX_MGMT_NONE; + return; ifmgd->beacon_crc = ncrc; ifmgd->beacon_crc_valid = true; @@ -3151,7 +3113,9 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, true, deauth_buf); - return RX_MGMT_CFG80211_TX_DEAUTH; + cfg80211_send_deauth(sdata->dev, deauth_buf, + sizeof(deauth_buf)); + return; } if (sta && elems.opmode_notif) @@ -3168,19 +3132,13 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, elems.pwr_constr_elem); ieee80211_bss_info_change_notify(sdata, changed); - - return RX_MGMT_NONE; } void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; - struct cfg80211_bss *bss = NULL; - enum rx_mgmt_action rma = RX_MGMT_NONE; - u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; u16 fc; struct ieee802_11_elems elems; int ies_len; @@ -3189,28 +3147,27 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: - rma = ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, - deauth_buf, rx_status); + ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_RESP: ieee80211_rx_mgmt_probe_resp(sdata, skb); break; case IEEE80211_STYPE_AUTH: - rma = ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: - rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DISASSOC: - rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: - rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss); + ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { @@ -3256,34 +3213,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, } break; } - mutex_unlock(&ifmgd->mtx); - - switch (rma) { - case RX_MGMT_NONE: - /* no action */ - break; - case RX_MGMT_CFG80211_DEAUTH: - cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_DISASSOC: - cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_RX_AUTH: - cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_RX_ASSOC: - cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_ASSOC_TIMEOUT: - cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); - break; - case RX_MGMT_CFG80211_TX_DEAUTH: - cfg80211_send_deauth(sdata->dev, deauth_buf, - sizeof(deauth_buf)); - break; - default: - WARN(1, "unexpected: %d", rma); - } + sdata_unlock(sdata); } static void ieee80211_sta_timer(unsigned long data) @@ -3297,20 +3227,12 @@ static void ieee80211_sta_timer(unsigned long data) static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 *bssid, u8 reason, bool tx) { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, tx, frame_buf); - mutex_unlock(&ifmgd->mtx); - /* - * must be outside lock due to cfg80211, - * but that's not a problem. - */ cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); - - mutex_lock(&ifmgd->mtx); } static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) @@ -3320,7 +3242,7 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; u32 tx_flags = 0; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (WARN_ON_ONCE(!auth_data)) return -EINVAL; @@ -3393,7 +3315,7 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) if (tx_flags == 0) { auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, auth_data->timeout); + run_again(sdata, auth_data->timeout); } else { auth_data->timeout_started = false; } @@ -3406,7 +3328,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; struct ieee80211_local *local = sdata->local; - lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata); assoc_data->tries++; if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { @@ -3430,7 +3352,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; assoc_data->timeout_started = true; - run_again(&sdata->u.mgd, assoc_data->timeout); + run_again(sdata, assoc_data->timeout); } else { assoc_data->timeout_started = false; } @@ -3455,7 +3377,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (ifmgd->status_received) { __le16 fc = ifmgd->status_fc; @@ -3467,7 +3389,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) if (status_acked) { ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); } else { ifmgd->auth_data->timeout = jiffies - 1; } @@ -3478,7 +3400,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) if (status_acked) { ifmgd->assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT; - run_again(ifmgd, ifmgd->assoc_data->timeout); + run_again(sdata, ifmgd->assoc_data->timeout); } else { ifmgd->assoc_data->timeout = jiffies - 1; } @@ -3501,12 +3423,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) ieee80211_destroy_auth_data(sdata, false); - mutex_unlock(&ifmgd->mtx); cfg80211_send_auth_timeout(sdata->dev, bssid); - mutex_lock(&ifmgd->mtx); } } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && time_after(jiffies, ifmgd->assoc_data->timeout)) { @@ -3519,12 +3439,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) ieee80211_destroy_assoc_data(sdata, false); - mutex_unlock(&ifmgd->mtx); cfg80211_send_assoc_timeout(sdata->dev, bssid); - mutex_lock(&ifmgd->mtx); } } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) - run_again(ifmgd, ifmgd->assoc_data->timeout); + run_again(sdata, ifmgd->assoc_data->timeout); if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL) && @@ -3558,7 +3476,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) false); } } else if (time_is_after_jiffies(ifmgd->probe_timeout)) - run_again(ifmgd, ifmgd->probe_timeout); + run_again(sdata, ifmgd->probe_timeout); else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { mlme_dbg(sdata, "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", @@ -3587,7 +3505,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) } } - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } static void ieee80211_sta_bcn_mon_timer(unsigned long data) @@ -3648,9 +3566,9 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; } @@ -3661,10 +3579,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) ifmgd->associated->bssid, WLAN_REASON_UNSPECIFIED, true); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; } - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } #endif @@ -3696,8 +3614,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len; ifmgd->p2p_noa_index = -1; - mutex_init(&ifmgd->mtx); - if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; else @@ -4053,8 +3969,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, /* try to authenticate/probe */ - mutex_lock(&ifmgd->mtx); - if ((ifmgd->auth_data && !ifmgd->auth_data->done) || ifmgd->assoc_data) { err = -EBUSY; @@ -4074,8 +3988,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, WLAN_REASON_UNSPECIFIED, false, frame_buf); - __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); } sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -4092,8 +4006,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, /* hold our own reference */ cfg80211_ref_bss(local->hw.wiphy, auth_data->bss); - err = 0; - goto out_unlock; + return 0; err_clear: memset(ifmgd->bssid, 0, ETH_ALEN); @@ -4101,9 +4014,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, ifmgd->auth_data = NULL; err_free: kfree(auth_data); - out_unlock: - mutex_unlock(&ifmgd->mtx); - return err; } @@ -4134,8 +4044,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, assoc_data->ssid_len = ssidie[1]; rcu_read_unlock(); - mutex_lock(&ifmgd->mtx); - if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; @@ -4143,8 +4051,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, WLAN_REASON_UNSPECIFIED, false, frame_buf); - __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); } if (ifmgd->auth_data && !ifmgd->auth_data->done) { @@ -4338,7 +4246,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, } rcu_read_unlock(); - run_again(ifmgd, assoc_data->timeout); + run_again(sdata, assoc_data->timeout); if (bss->corrupt_data) { char *corrupt_type = "data"; @@ -4354,17 +4262,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, corrupt_type); } - err = 0; - goto out; + return 0; err_clear: memset(ifmgd->bssid, 0, ETH_ALEN); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->assoc_data = NULL; err_free: kfree(assoc_data); - out: - mutex_unlock(&ifmgd->mtx); - return err; } @@ -4376,8 +4280,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, bool tx = !req->local_state_change; bool report_frame = false; - mutex_lock(&ifmgd->mtx); - sdata_info(sdata, "deauthenticating from %pM by local choice (reason=%d)\n", req->bssid, req->reason_code); @@ -4389,7 +4291,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, req->reason_code, tx, frame_buf); ieee80211_destroy_auth_data(sdata, false); - mutex_unlock(&ifmgd->mtx); report_frame = true; goto out; @@ -4401,12 +4302,11 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, req->reason_code, tx, frame_buf); report_frame = true; } - mutex_unlock(&ifmgd->mtx); out: if (report_frame) - __cfg80211_send_deauth(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_send_deauth(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); return 0; } @@ -4418,18 +4318,14 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, u8 bssid[ETH_ALEN]; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - mutex_lock(&ifmgd->mtx); - /* * cfg80211 should catch this ... but it's racy since * we can receive a disassoc frame, process it, hand it * to cfg80211 while that's in a locked section already * trying to tell us that the user wants to disconnect. */ - if (ifmgd->associated != req->bss) { - mutex_unlock(&ifmgd->mtx); + if (ifmgd->associated != req->bss) return -ENOLINK; - } sdata_info(sdata, "disassociating from %pM by local choice (reason=%d)\n", @@ -4439,10 +4335,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, req->reason_code, !req->local_state_change, frame_buf); - mutex_unlock(&ifmgd->mtx); - __cfg80211_send_disassoc(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_send_disassoc(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); return 0; } @@ -4462,13 +4357,13 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) cancel_work_sync(&ifmgd->csa_connection_drop_work); cancel_work_sync(&ifmgd->chswitch_work); - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false); if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); del_timer_sync(&ifmgd->timer); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, diff --git a/net/mac80211/util.c b/net/mac80211/util.c index ffdfe4bc89ad..2a8d759324c2 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1581,9 +1581,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sdata->u.mgd.dtim_period) changed |= BSS_CHANGED_DTIM_PERIOD; - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); ieee80211_bss_info_change_notify(sdata, changed); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 68b40f21bc38..80ffb0138919 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -25,12 +25,9 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); trace_cfg80211_send_rx_auth(dev); - wdev_lock(wdev); nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); cfg80211_sme_rx_auth(dev, buf, len); - - wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_rx_auth); @@ -46,7 +43,6 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); trace_cfg80211_send_rx_assoc(dev, bss); - wdev_lock(wdev); status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); @@ -59,7 +55,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && cfg80211_sme_failed_reassoc(wdev)) { cfg80211_put_bss(wiphy, bss); - goto out; + return; } nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); @@ -71,7 +67,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, * sme will schedule work that does it later. */ cfg80211_put_bss(wiphy, bss); - goto out; + return; } if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) { @@ -87,13 +83,11 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, status_code, status_code == WLAN_STATUS_SUCCESS, bss); - out: - wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_rx_assoc); -void __cfg80211_send_deauth(struct net_device *dev, - const u8 *buf, size_t len) +void cfg80211_send_deauth(struct net_device *dev, + const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; @@ -102,7 +96,7 @@ void __cfg80211_send_deauth(struct net_device *dev, const u8 *bssid = mgmt->bssid; bool was_current = false; - trace___cfg80211_send_deauth(dev); + trace_cfg80211_send_deauth(dev); ASSERT_WDEV_LOCK(wdev); if (wdev->current_bss && @@ -129,20 +123,10 @@ void __cfg80211_send_deauth(struct net_device *dev, false, NULL); } } -EXPORT_SYMBOL(__cfg80211_send_deauth); - -void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - - wdev_lock(wdev); - __cfg80211_send_deauth(dev, buf, len); - wdev_unlock(wdev); -} EXPORT_SYMBOL(cfg80211_send_deauth); -void __cfg80211_send_disassoc(struct net_device *dev, - const u8 *buf, size_t len) +void cfg80211_send_disassoc(struct net_device *dev, + const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; @@ -152,7 +136,7 @@ void __cfg80211_send_disassoc(struct net_device *dev, u16 reason_code; bool from_ap; - trace___cfg80211_send_disassoc(dev); + trace_cfg80211_send_disassoc(dev); ASSERT_WDEV_LOCK(wdev); nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); @@ -175,16 +159,6 @@ void __cfg80211_send_disassoc(struct net_device *dev, from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } -EXPORT_SYMBOL(__cfg80211_send_disassoc); - -void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - - wdev_lock(wdev); - __cfg80211_send_disassoc(dev, buf, len); - wdev_unlock(wdev); -} EXPORT_SYMBOL(cfg80211_send_disassoc); void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) @@ -194,15 +168,12 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); trace_cfg80211_send_auth_timeout(dev, addr); - wdev_lock(wdev); nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); if (wdev->sme_state == CFG80211_SME_CONNECTING) __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); - - wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_auth_timeout); @@ -213,15 +184,12 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); trace_cfg80211_send_assoc_timeout(dev, addr); - wdev_lock(wdev); nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); if (wdev->sme_state == CFG80211_SME_CONNECTING) __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); - - wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_assoc_timeout); diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 5755bc14abbd..23fafeae8a10 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1911,12 +1911,12 @@ TRACE_EVENT(cfg80211_send_rx_assoc, NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG) ); -DEFINE_EVENT(netdev_evt_only, __cfg80211_send_deauth, +DEFINE_EVENT(netdev_evt_only, cfg80211_send_deauth, TP_PROTO(struct net_device *netdev), TP_ARGS(netdev) ); -DEFINE_EVENT(netdev_evt_only, __cfg80211_send_disassoc, +DEFINE_EVENT(netdev_evt_only, cfg80211_send_disassoc, TP_PROTO(struct net_device *netdev), TP_ARGS(netdev) ); -- cgit v1.2.3 From 079956742452494326081349a66942654498cafa Mon Sep 17 00:00:00 2001 From: Zhang Yanfei Date: Mon, 29 Apr 2013 11:55:10 -0700 Subject: ipvs: change type of netns_ipvs->sysctl_sync_qlen_max This member of struct netns_ipvs is calculated from nr_free_buffer_pages so change its type to unsigned long in case of overflow. Also, type of its related proc var sync_qlen_max and the return type of function sysctl_sync_qlen_max() should be changed to unsigned long, too. Besides, the type of ipvs_master_sync_state->sync_queue_len should be changed to unsigned long accordingly. Signed-off-by: Zhang Yanfei Cc: Julian Anastasov Cc: David Miller Signed-off-by: Andrew Morton Signed-off-by: Simon Horman --- include/net/ip_vs.h | 8 ++++---- net/netfilter/ipvs/ip_vs_ctl.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 4c062ccff9aa..4405886980c7 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -905,7 +905,7 @@ struct ip_vs_app { struct ipvs_master_sync_state { struct list_head sync_queue; struct ip_vs_sync_buff *sync_buff; - int sync_queue_len; + unsigned long sync_queue_len; unsigned int sync_queue_delay; struct task_struct *master_thread; struct delayed_work master_wakeup_work; @@ -998,7 +998,7 @@ struct netns_ipvs { int sysctl_snat_reroute; int sysctl_sync_ver; int sysctl_sync_ports; - int sysctl_sync_qlen_max; + unsigned long sysctl_sync_qlen_max; int sysctl_sync_sock_size; int sysctl_cache_bypass; int sysctl_expire_nodest_conn; @@ -1085,7 +1085,7 @@ static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) return ACCESS_ONCE(ipvs->sysctl_sync_ports); } -static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs) +static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) { return ipvs->sysctl_sync_qlen_max; } @@ -1138,7 +1138,7 @@ static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) return 1; } -static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs) +static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) { return IPVS_SYNC_QLEN_MAX; } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 5b142fb16480..70146496e73a 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1716,9 +1716,9 @@ static struct ctl_table vs_vars[] = { }, { .procname = "sync_qlen_max", - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "sync_sock_size", -- cgit v1.2.3 From 6d0bfe22611602f36617bc7aa2ffa1bbb2f54c67 Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Wed, 22 May 2013 20:17:31 +0000 Subject: net: ipv6: Add IPv6 support to the ping socket. This adds the ability to send ICMPv6 echo requests without a raw socket. The equivalent ability for ICMPv4 was added in 2011. Instead of having separate code paths for IPv4 and IPv6, make most of the code in net/ipv4/ping.c dual-stack and only add a few IPv6-specific bits (like the protocol definition) to a new net/ipv6/ping.c. Hopefully this will reduce divergence and/or duplication of bugs in the future. Caveats: - Setting options via ancillary data (e.g., using IPV6_PKTINFO to specify the outgoing interface) is not yet supported. - There are no separate security settings for IPv4 and IPv6; everything is controlled by /proc/net/ipv4/ping_group_range. - The proc interface does not yet display IPv6 ping sockets properly. Tested with a patched copy of ping6 and using raw socket calls. Compiles and works with all of CONFIG_IPV6={n,m,y}. Signed-off-by: Lorenzo Colitti Signed-off-by: David S. Miller --- include/net/ipv6.h | 6 + include/net/ping.h | 49 ++++- include/net/transp_v6.h | 3 + net/ipv4/icmp.c | 5 +- net/ipv4/ping.c | 557 ++++++++++++++++++++++++++++++++++-------------- net/ipv6/Makefile | 2 +- net/ipv6/af_inet6.c | 12 ++ net/ipv6/icmp.c | 19 +- net/ipv6/ping.c | 216 +++++++++++++++++++ 9 files changed, 698 insertions(+), 171 deletions(-) create mode 100644 net/ipv6/ping.c (limited to 'include') diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 0810aa57c780..ab47582f6c0b 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -260,6 +260,12 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl) extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info); +int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, + struct icmp6hdr *thdr, int len); + +struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb, + struct sock *sk, struct flowi6 *fl6); + extern int ip6_ra_control(struct sock *sk, int sel); extern int ipv6_parse_hopopts(struct sk_buff *skb); diff --git a/include/net/ping.h b/include/net/ping.h index 682b5ae9af51..9242fa090d3d 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -13,6 +13,7 @@ #ifndef _PING_H #define _PING_H +#include #include /* PING_HTABLE_SIZE must be power of 2 */ @@ -28,6 +29,18 @@ */ #define GID_T_MAX (((gid_t)~0U) >> 1) +/* Compatibility glue so we can support IPv6 when it's compiled as a module */ +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len); + int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, + struct sk_buff *skb); + int (*icmpv6_err_convert)(u8 type, u8 code, int *err); + void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err, + __be16 port, u32 info, u8 *payload); + int (*ipv6_chk_addr)(struct net *net, const struct in6_addr *addr, + struct net_device *dev, int strict); +}; + struct ping_table { struct hlist_nulls_head hash[PING_HTABLE_SIZE]; rwlock_t lock; @@ -39,10 +52,39 @@ struct ping_iter_state { }; extern struct proto ping_prot; +extern struct ping_table ping_table; +#if IS_ENABLED(CONFIG_IPV6) +extern struct pingv6_ops pingv6_ops; +#endif +struct pingfakehdr { + struct icmphdr icmph; + struct iovec *iov; + sa_family_t family; + __wsum wcheck; +}; -extern void ping_rcv(struct sk_buff *); -extern void ping_err(struct sk_buff *, u32 info); +int ping_get_port(struct sock *sk, unsigned short ident); +void ping_hash(struct sock *sk); +void ping_unhash(struct sock *sk); + +int ping_init_sock(struct sock *sk); +void ping_close(struct sock *sk, long timeout); +int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len); +void ping_err(struct sk_buff *skb, int offset, u32 info); +int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, + struct sk_buff *); + +int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len, int noblock, int flags, int *addr_len); +int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, + void *user_icmph, size_t icmph_len); +int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len); +int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len); +int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +void ping_rcv(struct sk_buff *skb); #ifdef CONFIG_PROC_FS extern int __init ping_proc_init(void); @@ -50,6 +92,7 @@ extern void ping_proc_exit(void); #endif void __init ping_init(void); - +int __init pingv6_init(void); +void pingv6_exit(void); #endif /* _PING_H */ diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 938b7fd11204..eb40e71ff2ee 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -11,6 +11,7 @@ extern struct proto rawv6_prot; extern struct proto udpv6_prot; extern struct proto udplitev6_prot; extern struct proto tcpv6_prot; +extern struct proto pingv6_prot; struct flowi6; @@ -21,6 +22,8 @@ extern int ipv6_frag_init(void); extern void ipv6_frag_exit(void); /* transport protocols */ +extern int pingv6_init(void); +extern void pingv6_exit(void); extern int rawv6_init(void); extern void rawv6_exit(void); extern int udpv6_init(void); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 76e10b47e053..562efd91f457 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -939,7 +939,8 @@ error: void icmp_err(struct sk_buff *skb, u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; - struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); + int offset = iph->ihl<<2; + struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset); int type = icmp_hdr(skb)->type; int code = icmp_hdr(skb)->code; struct net *net = dev_net(skb->dev); @@ -949,7 +950,7 @@ void icmp_err(struct sk_buff *skb, u32 info) * triggered by ICMP_ECHOREPLY which sent from kernel. */ if (icmph->type != ICMP_ECHOREPLY) { - ping_err(skb, info); + ping_err(skb, offset, info); return; } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 7d93d62cd5fd..71f6ad02fa67 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -46,8 +45,18 @@ #include #include +#if IS_ENABLED(CONFIG_IPV6) +#include +#include +#include +#include +#include +#endif -static struct ping_table ping_table; + +struct ping_table ping_table; +struct pingv6_ops pingv6_ops; +EXPORT_SYMBOL_GPL(pingv6_ops); static u16 ping_port_rover; @@ -58,6 +67,7 @@ static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int ma pr_debug("hash(%d) = %d\n", num, res); return res; } +EXPORT_SYMBOL_GPL(ping_hash); static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, struct net *net, unsigned int num) @@ -65,7 +75,7 @@ static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; } -static int ping_v4_get_port(struct sock *sk, unsigned short ident) +int ping_get_port(struct sock *sk, unsigned short ident) { struct hlist_nulls_node *node; struct hlist_nulls_head *hlist; @@ -103,6 +113,10 @@ next_port: ping_portaddr_for_each_entry(sk2, node, hlist) { isk2 = inet_sk(sk2); + /* BUG? Why is this reuse and not reuseaddr? ping.c + * doesn't turn off SO_REUSEADDR, and it doesn't expect + * that other ping processes can steal its packets. + */ if ((isk2->inet_num == ident) && (sk2 != sk) && (!sk2->sk_reuse || !sk->sk_reuse)) @@ -125,17 +139,18 @@ fail: write_unlock_bh(&ping_table.lock); return 1; } +EXPORT_SYMBOL_GPL(ping_get_port); -static void ping_v4_hash(struct sock *sk) +void ping_hash(struct sock *sk) { - pr_debug("ping_v4_hash(sk->port=%u)\n", inet_sk(sk)->inet_num); + pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num); BUG(); /* "Please do not press this button again." */ } -static void ping_v4_unhash(struct sock *sk) +void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); - pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); + pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); if (sk_hashed(sk)) { write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); @@ -146,31 +161,61 @@ static void ping_v4_unhash(struct sock *sk) write_unlock_bh(&ping_table.lock); } } +EXPORT_SYMBOL_GPL(ping_unhash); -static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr, - u16 ident, int dif) +static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) { struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident); struct sock *sk = NULL; struct inet_sock *isk; struct hlist_nulls_node *hnode; + int dif = skb->dev->ifindex; + + if (skb->protocol == htons(ETH_P_IP)) { + pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", + (int)ident, &ip_hdr(skb)->daddr, dif); +#if IS_ENABLED(CONFIG_IPV6) + } else if (skb->protocol == htons(ETH_P_IPV6)) { + pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n", + (int)ident, &ipv6_hdr(skb)->daddr, dif); +#endif + } - pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", - (int)ident, &daddr, dif); read_lock_bh(&ping_table.lock); ping_portaddr_for_each_entry(sk, hnode, hslot) { isk = inet_sk(sk); - pr_debug("found: %p: num = %d, daddr = %pI4, dif = %d\n", sk, - (int)isk->inet_num, &isk->inet_rcv_saddr, - sk->sk_bound_dev_if); - pr_debug("iterate\n"); if (isk->inet_num != ident) continue; - if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != daddr) - continue; + + if (skb->protocol == htons(ETH_P_IP) && + sk->sk_family == AF_INET) { + pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk, + (int) isk->inet_num, &isk->inet_rcv_saddr, + sk->sk_bound_dev_if); + + if (isk->inet_rcv_saddr && + isk->inet_rcv_saddr != ip_hdr(skb)->daddr) + continue; +#if IS_ENABLED(CONFIG_IPV6) + } else if (skb->protocol == htons(ETH_P_IPV6) && + sk->sk_family == AF_INET6) { + struct ipv6_pinfo *np = inet6_sk(sk); + + pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk, + (int) isk->inet_num, + &inet6_sk(sk)->rcv_saddr, + sk->sk_bound_dev_if); + + if (!ipv6_addr_any(&np->rcv_saddr) && + !ipv6_addr_equal(&np->rcv_saddr, + &ipv6_hdr(skb)->daddr)) + continue; +#endif + } + if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) continue; @@ -200,7 +245,7 @@ static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, } -static int ping_init_sock(struct sock *sk) +int ping_init_sock(struct sock *sk) { struct net *net = sock_net(sk); kgid_t group = current_egid(); @@ -225,8 +270,9 @@ static int ping_init_sock(struct sock *sk) return -EACCES; } +EXPORT_SYMBOL_GPL(ping_init_sock); -static void ping_close(struct sock *sk, long timeout) +void ping_close(struct sock *sk, long timeout) { pr_debug("ping_close(sk=%p,sk->num=%u)\n", inet_sk(sk), inet_sk(sk)->inet_num); @@ -234,36 +280,122 @@ static void ping_close(struct sock *sk, long timeout) sk_common_release(sk); } +EXPORT_SYMBOL_GPL(ping_close); + +/* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */ +int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, + struct sockaddr *uaddr, int addr_len) { + struct net *net = sock_net(sk); + if (sk->sk_family == AF_INET) { + struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; + int chk_addr_ret; + + if (addr_len < sizeof(*addr)) + return -EINVAL; + + pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", + sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); + + chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); + + if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) + chk_addr_ret = RTN_LOCAL; + + if ((sysctl_ip_nonlocal_bind == 0 && + isk->freebind == 0 && isk->transparent == 0 && + chk_addr_ret != RTN_LOCAL) || + chk_addr_ret == RTN_MULTICAST || + chk_addr_ret == RTN_BROADCAST) + return -EADDRNOTAVAIL; + +#if IS_ENABLED(CONFIG_IPV6) + } else if (sk->sk_family == AF_INET6) { + struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; + int addr_type, scoped, has_addr; + struct net_device *dev = NULL; + + if (addr_len < sizeof(*addr)) + return -EINVAL; + + pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", + sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); + + addr_type = ipv6_addr_type(&addr->sin6_addr); + scoped = __ipv6_addr_needs_scope_id(addr_type); + if ((addr_type != IPV6_ADDR_ANY && + !(addr_type & IPV6_ADDR_UNICAST)) || + (scoped && !addr->sin6_scope_id)) + return -EINVAL; + + rcu_read_lock(); + if (addr->sin6_scope_id) { + dev = dev_get_by_index_rcu(net, addr->sin6_scope_id); + if (!dev) { + rcu_read_unlock(); + return -ENODEV; + } + } + has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev, + scoped); + rcu_read_unlock(); + + if (!(isk->freebind || isk->transparent || has_addr || + addr_type == IPV6_ADDR_ANY)) + return -EADDRNOTAVAIL; + + if (scoped) + sk->sk_bound_dev_if = addr->sin6_scope_id; +#endif + } else { + return -EAFNOSUPPORT; + } + return 0; +} +void ping_set_saddr(struct sock *sk, struct sockaddr *saddr) +{ + if (saddr->sa_family == AF_INET) { + struct inet_sock *isk = inet_sk(sk); + struct sockaddr_in *addr = (struct sockaddr_in *) saddr; + isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr; +#if IS_ENABLED(CONFIG_IPV6) + } else if (saddr->sa_family == AF_INET6) { + struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr; + struct ipv6_pinfo *np = inet6_sk(sk); + np->rcv_saddr = np->saddr = addr->sin6_addr; +#endif + } +} + +void ping_clear_saddr(struct sock *sk, int dif) +{ + sk->sk_bound_dev_if = dif; + if (sk->sk_family == AF_INET) { + struct inet_sock *isk = inet_sk(sk); + isk->inet_rcv_saddr = isk->inet_saddr = 0; +#if IS_ENABLED(CONFIG_IPV6) + } else if (sk->sk_family == AF_INET6) { + struct ipv6_pinfo *np = inet6_sk(sk); + memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr)); + memset(&np->saddr, 0, sizeof(np->saddr)); +#endif + } +} /* * We need our own bind because there are no privileged id's == local ports. * Moreover, we don't allow binding to multi- and broadcast addresses. */ -static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { - struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; struct inet_sock *isk = inet_sk(sk); unsigned short snum; - int chk_addr_ret; int err; + int dif = sk->sk_bound_dev_if; - if (addr_len < sizeof(struct sockaddr_in)) - return -EINVAL; - - pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n", - sk, addr->sin_addr.s_addr, ntohs(addr->sin_port)); - - chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); - if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) - chk_addr_ret = RTN_LOCAL; - - if ((sysctl_ip_nonlocal_bind == 0 && - isk->freebind == 0 && isk->transparent == 0 && - chk_addr_ret != RTN_LOCAL) || - chk_addr_ret == RTN_MULTICAST || - chk_addr_ret == RTN_BROADCAST) - return -EADDRNOTAVAIL; + err = ping_check_bind_addr(sk, isk, uaddr, addr_len); + if (err) + return err; lock_sock(sk); @@ -272,42 +404,50 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) goto out; err = -EADDRINUSE; - isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr; - snum = ntohs(addr->sin_port); - if (ping_v4_get_port(sk, snum) != 0) { - isk->inet_saddr = isk->inet_rcv_saddr = 0; + ping_set_saddr(sk, uaddr); + snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port); + if (ping_get_port(sk, snum) != 0) { + ping_clear_saddr(sk, dif); goto out; } - pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n", + pr_debug("after bind(): num = %d, dif = %d\n", (int)isk->inet_num, - &isk->inet_rcv_saddr, (int)sk->sk_bound_dev_if); err = 0; - if (isk->inet_rcv_saddr) + if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) || + (sk->sk_family == AF_INET6 && + !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr))) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; + if (snum) sk->sk_userlocks |= SOCK_BINDPORT_LOCK; isk->inet_sport = htons(isk->inet_num); isk->inet_daddr = 0; isk->inet_dport = 0; + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr)); +#endif + sk_dst_reset(sk); out: release_sock(sk); pr_debug("ping_v4_bind -> %d\n", err); return err; } +EXPORT_SYMBOL_GPL(ping_bind); /* * Is this a supported type of ICMP message? */ -static inline int ping_supported(int type, int code) +static inline int ping_supported(int family, int type, int code) { - if (type == ICMP_ECHO && code == 0) - return 1; - return 0; + return (family == AF_INET && type == ICMP_ECHO && code == 0) || + (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0); } /* @@ -315,30 +455,42 @@ static inline int ping_supported(int type, int code) * sort of error condition. */ -static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); - -void ping_err(struct sk_buff *skb, u32 info) +void ping_err(struct sk_buff *skb, int offset, u32 info) { - struct iphdr *iph = (struct iphdr *)skb->data; - struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); + int family; + struct icmphdr *icmph; struct inet_sock *inet_sock; - int type = icmp_hdr(skb)->type; - int code = icmp_hdr(skb)->code; + int type; + int code; struct net *net = dev_net(skb->dev); struct sock *sk; int harderr; int err; + if (skb->protocol == htons(ETH_P_IP)) { + family = AF_INET; + type = icmp_hdr(skb)->type; + code = icmp_hdr(skb)->code; + icmph = (struct icmphdr *)(skb->data + offset); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + family = AF_INET6; + type = icmp6_hdr(skb)->icmp6_type; + code = icmp6_hdr(skb)->icmp6_code; + icmph = (struct icmphdr *) (skb->data + offset); + } else { + BUG(); + } + /* We assume the packet has already been checked by icmp_unreach */ - if (!ping_supported(icmph->type, icmph->code)) + if (!ping_supported(family, icmph->type, icmph->code)) return; - pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type, - code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); + pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n", + skb->protocol, type, code, ntohs(icmph->un.echo.id), + ntohs(icmph->un.echo.sequence)); - sk = ping_v4_lookup(net, iph->daddr, iph->saddr, - ntohs(icmph->un.echo.id), skb->dev->ifindex); + sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk == NULL) { pr_debug("no socket, dropping\n"); return; /* No socket for error */ @@ -349,72 +501,83 @@ void ping_err(struct sk_buff *skb, u32 info) harderr = 0; inet_sock = inet_sk(sk); - switch (type) { - default: - case ICMP_TIME_EXCEEDED: - err = EHOSTUNREACH; - break; - case ICMP_SOURCE_QUENCH: - /* This is not a real error but ping wants to see it. - * Report it with some fake errno. */ - err = EREMOTEIO; - break; - case ICMP_PARAMETERPROB: - err = EPROTO; - harderr = 1; - break; - case ICMP_DEST_UNREACH: - if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ - ipv4_sk_update_pmtu(skb, sk, info); - if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { - err = EMSGSIZE; - harderr = 1; - break; + if (skb->protocol == htons(ETH_P_IP)) { + switch (type) { + default: + case ICMP_TIME_EXCEEDED: + err = EHOSTUNREACH; + break; + case ICMP_SOURCE_QUENCH: + /* This is not a real error but ping wants to see it. + * Report it with some fake errno. + */ + err = EREMOTEIO; + break; + case ICMP_PARAMETERPROB: + err = EPROTO; + harderr = 1; + break; + case ICMP_DEST_UNREACH: + if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ + ipv4_sk_update_pmtu(skb, sk, info); + if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { + err = EMSGSIZE; + harderr = 1; + break; + } + goto out; } - goto out; - } - err = EHOSTUNREACH; - if (code <= NR_ICMP_UNREACH) { - harderr = icmp_err_convert[code].fatal; - err = icmp_err_convert[code].errno; + err = EHOSTUNREACH; + if (code <= NR_ICMP_UNREACH) { + harderr = icmp_err_convert[code].fatal; + err = icmp_err_convert[code].errno; + } + break; + case ICMP_REDIRECT: + /* See ICMP_SOURCE_QUENCH */ + ipv4_sk_redirect(skb, sk); + err = EREMOTEIO; + break; } - break; - case ICMP_REDIRECT: - /* See ICMP_SOURCE_QUENCH */ - ipv4_sk_redirect(skb, sk); - err = EREMOTEIO; - break; +#if IS_ENABLED(CONFIG_IPV6) + } else if (skb->protocol == htons(ETH_P_IPV6)) { + harderr = pingv6_ops.icmpv6_err_convert(type, code, &err); +#endif } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ - if (!inet_sock->recverr) { + if ((family == AF_INET && !inet_sock->recverr) || + (family == AF_INET6 && !inet6_sk(sk)->recverr)) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else { - ip_icmp_error(sk, skb, err, 0 /* no remote port */, - info, (u8 *)icmph); + if (family == AF_INET) { + ip_icmp_error(sk, skb, err, 0 /* no remote port */, + info, (u8 *)icmph); +#if IS_ENABLED(CONFIG_IPV6) + } else if (family == AF_INET6) { + pingv6_ops.ipv6_icmp_error(sk, skb, err, 0, + info, (u8 *)icmph); +#endif + } } sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } +EXPORT_SYMBOL_GPL(ping_err); /* - * Copy and checksum an ICMP Echo packet from user space into a buffer. + * Copy and checksum an ICMP Echo packet from user space into a buffer + * starting from the payload. */ -struct pingfakehdr { - struct icmphdr icmph; - struct iovec *iov; - __wsum wcheck; -}; - -static int ping_getfrag(void *from, char *to, - int offset, int fraglen, int odd, struct sk_buff *skb) +int ping_getfrag(void *from, char *to, + int offset, int fraglen, int odd, struct sk_buff *skb) { struct pingfakehdr *pfh = (struct pingfakehdr *)from; @@ -425,20 +588,33 @@ static int ping_getfrag(void *from, char *to, pfh->iov, 0, fraglen - sizeof(struct icmphdr), &pfh->wcheck)) return -EFAULT; + } else if (offset < sizeof(struct icmphdr)) { + BUG(); + } else { + if (csum_partial_copy_fromiovecend + (to, pfh->iov, offset - sizeof(struct icmphdr), + fraglen, &pfh->wcheck)) + return -EFAULT; + } - return 0; +#if IS_ENABLED(CONFIG_IPV6) + /* For IPv6, checksum each skb as we go along, as expected by + * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in + * wcheck, it will be finalized in ping_v4_push_pending_frames. + */ + if (pfh->family == AF_INET6) { + skb->csum = pfh->wcheck; + skb->ip_summed = CHECKSUM_NONE; + pfh->wcheck = 0; } - if (offset < sizeof(struct icmphdr)) - BUG(); - if (csum_partial_copy_fromiovecend - (to, pfh->iov, offset - sizeof(struct icmphdr), - fraglen, &pfh->wcheck)) - return -EFAULT; +#endif + return 0; } +EXPORT_SYMBOL_GPL(ping_getfrag); -static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, - struct flowi4 *fl4) +static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, + struct flowi4 *fl4) { struct sk_buff *skb = skb_peek(&sk->sk_write_queue); @@ -450,24 +626,9 @@ static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, return ip_push_pending_frames(sk, fl4); } -static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len) -{ - struct net *net = sock_net(sk); - struct flowi4 fl4; - struct inet_sock *inet = inet_sk(sk); - struct ipcm_cookie ipc; - struct icmphdr user_icmph; - struct pingfakehdr pfh; - struct rtable *rt = NULL; - struct ip_options_data opt_copy; - int free = 0; - __be32 saddr, daddr, faddr; - u8 tos; - int err; - - pr_debug("ping_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); - +int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, + void *user_icmph, size_t icmph_len) { + u8 type, code; if (len > 0xFFFF) return -EMSGSIZE; @@ -482,15 +643,53 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, /* * Fetch the ICMP header provided by the userland. - * iovec is modified! + * iovec is modified! The ICMP header is consumed. */ - - if (memcpy_fromiovec((u8 *)&user_icmph, msg->msg_iov, - sizeof(struct icmphdr))) + if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len)) return -EFAULT; - if (!ping_supported(user_icmph.type, user_icmph.code)) + + if (family == AF_INET) { + type = ((struct icmphdr *) user_icmph)->type; + code = ((struct icmphdr *) user_icmph)->code; +#if IS_ENABLED(CONFIG_IPV6) + } else if (family == AF_INET6) { + type = ((struct icmp6hdr *) user_icmph)->icmp6_type; + code = ((struct icmp6hdr *) user_icmph)->icmp6_code; +#endif + } else { + BUG(); + } + + if (!ping_supported(family, type, code)) return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(ping_common_sendmsg); + +int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len) +{ + struct net *net = sock_net(sk); + struct flowi4 fl4; + struct inet_sock *inet = inet_sk(sk); + struct ipcm_cookie ipc; + struct icmphdr user_icmph; + struct pingfakehdr pfh; + struct rtable *rt = NULL; + struct ip_options_data opt_copy; + int free = 0; + __be32 saddr, daddr, faddr; + u8 tos; + int err; + + pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); + + err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph, + sizeof(user_icmph)); + if (err) + return err; + /* * Get and verify the address. */ @@ -595,13 +794,14 @@ back_from_confirm: pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence; pfh.iov = msg->msg_iov; pfh.wcheck = 0; + pfh.family = AF_INET; err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); else - err = ping_push_pending_frames(sk, &pfh, &fl4); + err = ping_v4_push_pending_frames(sk, &pfh, &fl4); release_sock(sk); out: @@ -622,11 +822,13 @@ do_confirm: goto out; } -static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len) +int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *isk = inet_sk(sk); - struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; + int family = sk->sk_family; + struct sockaddr_in *sin; + struct sockaddr_in6 *sin6; struct sk_buff *skb; int copied, err; @@ -636,11 +838,22 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto out; - if (addr_len) - *addr_len = sizeof(*sin); + if (addr_len) { + if (family == AF_INET) + *addr_len = sizeof(*sin); + else if (family == AF_INET6 && addr_len) + *addr_len = sizeof(*sin6); + } - if (flags & MSG_ERRQUEUE) - return ip_recv_error(sk, msg, len); + if (flags & MSG_ERRQUEUE) { + if (family == AF_INET) { + return ip_recv_error(sk, msg, len); +#if IS_ENABLED(CONFIG_IPV6) + } else if (family == AF_INET6) { + return pingv6_ops.ipv6_recv_error(sk, msg, len); +#endif + } + } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) @@ -659,15 +872,40 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, sock_recv_timestamp(msg, sk, skb); - /* Copy the address. */ - if (sin) { + /* Copy the address and add cmsg data. */ + if (family == AF_INET) { + sin = (struct sockaddr_in *) msg->msg_name; sin->sin_family = AF_INET; sin->sin_port = 0 /* skb->h.uh->source */; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); + + if (isk->cmsg_flags) + ip_cmsg_recv(msg, skb); + +#if IS_ENABLED(CONFIG_IPV6) + } else if (family == AF_INET6) { + struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6hdr *ip6 = ipv6_hdr(skb); + sin6 = (struct sockaddr_in6 *) msg->msg_name; + sin6->sin6_family = AF_INET6; + sin6->sin6_port = 0; + sin6->sin6_addr = ip6->saddr; + + if (np->sndflow) + sin6->sin6_flowinfo = ip6_flowinfo(ip6); + + if (__ipv6_addr_needs_scope_id( + ipv6_addr_type(&sin6->sin6_addr))) + sin6->sin6_scope_id = IP6CB(skb)->iif; + + if (inet6_sk(sk)->rxopt.all) + pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb); +#endif + } else { + BUG(); } - if (isk->cmsg_flags) - ip_cmsg_recv(msg, skb); + err = copied; done: @@ -676,8 +914,9 @@ out: pr_debug("ping_recvmsg -> %d\n", err); return err; } +EXPORT_SYMBOL_GPL(ping_recvmsg); -static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", inet_sk(sk), inet_sk(sk)->inet_num, skb); @@ -688,6 +927,7 @@ static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } return 0; } +EXPORT_SYMBOL_GPL(ping_queue_rcv_skb); /* @@ -698,10 +938,7 @@ void ping_rcv(struct sk_buff *skb) { struct sock *sk; struct net *net = dev_net(skb->dev); - struct iphdr *iph = ip_hdr(skb); struct icmphdr *icmph = icmp_hdr(skb); - __be32 saddr = iph->saddr; - __be32 daddr = iph->daddr; /* We assume the packet has already been checked by icmp_rcv */ @@ -711,8 +948,7 @@ void ping_rcv(struct sk_buff *skb) /* Push ICMP header back */ skb_push(skb, skb->data - (u8 *)icmph); - sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id), - skb->dev->ifindex); + sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk != NULL) { pr_debug("rcv on socket %p\n", sk); ping_queue_rcv_skb(sk, skb_get(skb)); @@ -723,6 +959,7 @@ void ping_rcv(struct sk_buff *skb) /* We're called from icmp_rcv(). kfree_skb() is done there. */ } +EXPORT_SYMBOL_GPL(ping_rcv); struct proto ping_prot = { .name = "PING", @@ -733,14 +970,14 @@ struct proto ping_prot = { .disconnect = udp_disconnect, .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, - .sendmsg = ping_sendmsg, + .sendmsg = ping_v4_sendmsg, .recvmsg = ping_recvmsg, .bind = ping_bind, .backlog_rcv = ping_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, - .hash = ping_v4_hash, - .unhash = ping_v4_unhash, - .get_port = ping_v4_get_port, + .hash = ping_hash, + .unhash = ping_unhash, + .get_port = ping_get_port, .obj_size = sizeof(struct inet_sock), }; EXPORT_SYMBOL(ping_prot); diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 9af088d2cdaa..470a9c008e9b 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_IPV6) += ipv6.o ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ addrlabel.o \ route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ - raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ + raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \ exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o ipv6-offload := ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index ab5c7ad482cd..a5ac969aeefe 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -840,6 +841,9 @@ static int __init inet6_init(void) if (err) goto out_unregister_udplite_proto; + err = proto_register(&pingv6_prot, 1); + if (err) + goto out_unregister_ping_proto; /* We MUST register RAW sockets before we create the ICMP6, * IGMP6, or NDISC control sockets. @@ -930,6 +934,10 @@ static int __init inet6_init(void) if (err) goto ipv6_packet_fail; + err = pingv6_init(); + if (err) + goto pingv6_fail; + #ifdef CONFIG_SYSCTL err = ipv6_sysctl_register(); if (err) @@ -942,6 +950,8 @@ out: sysctl_fail: ipv6_packet_cleanup(); #endif +pingv6_fail: + pingv6_exit(); ipv6_packet_fail: tcpv6_exit(); tcpv6_fail: @@ -985,6 +995,8 @@ register_pernet_fail: rtnl_unregister_all(PF_INET6); out_sock_register_fail: rawv6_exit(); +out_unregister_ping_proto: + proto_unregister(&pingv6_prot); out_unregister_raw_proto: proto_unregister(&rawv6_prot); out_unregister_udplite_proto: diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index b4ff0a42b8c7..1d2902e61786 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -57,6 +57,7 @@ #include #include +#include #include #include #include @@ -84,12 +85,18 @@ static inline struct sock *icmpv6_sk(struct net *net) static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { + /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */ + struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset); struct net *net = dev_net(skb->dev); if (type == ICMPV6_PKT_TOOBIG) ip6_update_pmtu(skb, net, info, 0, 0); else if (type == NDISC_REDIRECT) ip6_redirect(skb, net, 0, 0); + + if (!(type & ICMPV6_INFOMSG_MASK)) + if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) + ping_err(skb, offset, info); } static int icmpv6_rcv(struct sk_buff *skb); @@ -224,7 +231,8 @@ static bool opt_unrec(struct sk_buff *skb, __u32 offset) return (*op & 0xC0) == 0x80; } -static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len) +int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, + struct icmp6hdr *thdr, int len) { struct sk_buff *skb; struct icmp6hdr *icmp6h; @@ -307,8 +315,8 @@ static void mip6_addr_swap(struct sk_buff *skb) static inline void mip6_addr_swap(struct sk_buff *skb) {} #endif -static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb, - struct sock *sk, struct flowi6 *fl6) +struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb, + struct sock *sk, struct flowi6 *fl6) { struct dst_entry *dst, *dst2; struct flowi6 fl2; @@ -697,7 +705,8 @@ static int icmpv6_rcv(struct sk_buff *skb) skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 0)); if (__skb_checksum_complete(skb)) { - LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n", + LIMIT_NETDEBUG(KERN_DEBUG + "ICMPv6 checksum failed [%pI6c > %pI6c]\n", saddr, daddr); goto csum_error; } @@ -718,7 +727,7 @@ static int icmpv6_rcv(struct sk_buff *skb) break; case ICMPV6_ECHO_REPLY: - /* we couldn't care less */ + ping_rcv(skb); break; case ICMPV6_PKT_TOOBIG: diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c new file mode 100644 index 000000000000..a6462d657c15 --- /dev/null +++ b/net/ipv6/ping.c @@ -0,0 +1,216 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * "Ping" sockets + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Based on ipv4/ping.c code. + * + * Authors: Lorenzo Colitti (IPv6 support) + * Vasiliy Kulikov / Openwall (IPv4 implementation, for Linux 2.6), + * Pavel Kankovsky (IPv4 implementation, for Linux 2.4.32) + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct proto pingv6_prot = { + .name = "PINGv6", + .owner = THIS_MODULE, + .init = ping_init_sock, + .close = ping_close, + .connect = ip6_datagram_connect, + .disconnect = udp_disconnect, + .setsockopt = ipv6_setsockopt, + .getsockopt = ipv6_getsockopt, + .sendmsg = ping_v6_sendmsg, + .recvmsg = ping_recvmsg, + .bind = ping_bind, + .backlog_rcv = ping_queue_rcv_skb, + .hash = ping_hash, + .unhash = ping_unhash, + .get_port = ping_get_port, + .obj_size = sizeof(struct raw6_sock), +}; +EXPORT_SYMBOL_GPL(pingv6_prot); + +static struct inet_protosw pingv6_protosw = { + .type = SOCK_DGRAM, + .protocol = IPPROTO_ICMPV6, + .prot = &pingv6_prot, + .ops = &inet6_dgram_ops, + .no_check = UDP_CSUM_DEFAULT, + .flags = INET_PROTOSW_REUSE, +}; + + +/* Compatibility glue so we can support IPv6 when it's compiled as a module */ +int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) +{ + return -EAFNOSUPPORT; +} +int dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, + struct sk_buff *skb) +{ + return -EAFNOSUPPORT; +} +int dummy_icmpv6_err_convert(u8 type, u8 code, int *err) +{ + return -EAFNOSUPPORT; +} +void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, + __be16 port, u32 info, u8 *payload) {} +int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, + struct net_device *dev, int strict) +{ + return 0; +} + +int __init pingv6_init(void) +{ + pingv6_ops.ipv6_recv_error = ipv6_recv_error; + pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl; + pingv6_ops.icmpv6_err_convert = icmpv6_err_convert; + pingv6_ops.ipv6_icmp_error = ipv6_icmp_error; + pingv6_ops.ipv6_chk_addr = ipv6_chk_addr; + return inet6_register_protosw(&pingv6_protosw); +} + +/* This never gets called because it's not possible to unload the ipv6 module, + * but just in case. + */ +void pingv6_exit(void) +{ + pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error; + pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl; + pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert; + pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error; + pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr; + inet6_unregister_protosw(&pingv6_protosw); +} + +int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, + size_t len) +{ + struct inet_sock *inet = inet_sk(sk); + struct ipv6_pinfo *np = inet6_sk(sk); + struct icmp6hdr user_icmph; + int addr_type; + struct in6_addr *daddr; + int iif = 0; + struct flowi6 fl6; + int err; + int hlimit; + struct dst_entry *dst; + struct rt6_info *rt; + struct pingfakehdr pfh; + + pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); + + err = ping_common_sendmsg(AF_INET6, msg, len, &user_icmph, + sizeof(user_icmph)); + if (err) + return err; + + if (msg->msg_name) { + struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name; + if (msg->msg_namelen < sizeof(struct sockaddr_in6) || + u->sin6_family != AF_INET6) { + return -EINVAL; + } + if (sk->sk_bound_dev_if && + sk->sk_bound_dev_if != u->sin6_scope_id) { + return -EINVAL; + } + daddr = &(u->sin6_addr); + iif = u->sin6_scope_id; + } else { + if (sk->sk_state != TCP_ESTABLISHED) + return -EDESTADDRREQ; + daddr = &np->daddr; + } + + if (!iif) + iif = sk->sk_bound_dev_if; + + addr_type = ipv6_addr_type(daddr); + if (__ipv6_addr_needs_scope_id(addr_type) && !iif) + return -EINVAL; + if (addr_type & IPV6_ADDR_MAPPED) + return -EINVAL; + + /* TODO: use ip6_datagram_send_ctl to get options from cmsg */ + + memset(&fl6, 0, sizeof(fl6)); + + fl6.flowi6_proto = IPPROTO_ICMPV6; + fl6.saddr = np->saddr; + fl6.daddr = *daddr; + fl6.fl6_icmp_type = user_icmph.icmp6_type; + fl6.fl6_icmp_code = user_icmph.icmp6_code; + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + + if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) + fl6.flowi6_oif = np->mcast_oif; + else if (!fl6.flowi6_oif) + fl6.flowi6_oif = np->ucast_oif; + + dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, 1); + if (IS_ERR(dst)) + return PTR_ERR(dst); + rt = (struct rt6_info *) dst; + + np = inet6_sk(sk); + if (!np) + return -EBADF; + + if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) + fl6.flowi6_oif = np->mcast_oif; + else if (!fl6.flowi6_oif) + fl6.flowi6_oif = np->ucast_oif; + + pfh.icmph.type = user_icmph.icmp6_type; + pfh.icmph.code = user_icmph.icmp6_code; + pfh.icmph.checksum = 0; + pfh.icmph.un.echo.id = inet->inet_sport; + pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence; + pfh.iov = msg->msg_iov; + pfh.wcheck = 0; + pfh.family = AF_INET6; + + if (ipv6_addr_is_multicast(&fl6.daddr)) + hlimit = np->mcast_hops; + else + hlimit = np->hop_limit; + if (hlimit < 0) + hlimit = ip6_dst_hoplimit(dst); + + err = ip6_append_data(sk, ping_getfrag, &pfh, len, + 0, hlimit, + np->tclass, NULL, &fl6, rt, + MSG_DONTWAIT, np->dontfrag); + + if (err) { + ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev, + ICMP6_MIB_OUTERRORS); + ip6_flush_pending_frames(sk); + } else { + err = icmpv6_push_pending_frames(sk, &fl6, + (struct icmp6hdr *) &pfh.icmph, + len); + } + + return err; +} -- cgit v1.2.3 From 42e52bf9e3ae80fd44b21ddfcd64c54e6db2ff76 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 25 May 2013 04:12:10 +0000 Subject: net: add netnotifier event for upper device change Now when upper device is changed, event is not propagated via RT Netlink to userspace. Userspace might never now about the change. Fix this by adding upper-device-change notifier event. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + net/core/dev.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0ebd63ae2cc8..ea7b6bce9ea0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1593,6 +1593,7 @@ struct packet_offload { #define NETDEV_RELEASE 0x0012 #define NETDEV_NOTIFY_PEERS 0x0013 #define NETDEV_JOIN 0x0014 +#define NETDEV_CHANGEUPPER 0x0015 extern int register_netdevice_notifier(struct notifier_block *nb); extern int unregister_netdevice_notifier(struct notifier_block *nb); diff --git a/net/core/dev.c b/net/core/dev.c index 7229bc30e509..50c02ded1d69 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4411,7 +4411,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, else list_add_tail_rcu(&upper->list, &dev->upper_dev_list); dev_hold(upper_dev); - + call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); return 0; } @@ -4471,6 +4471,7 @@ void netdev_upper_dev_unlink(struct net_device *dev, list_del_rcu(&upper->list); dev_put(upper_dev); kfree_rcu(upper, rcu); + call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev); } EXPORT_SYMBOL(netdev_upper_dev_unlink); -- cgit v1.2.3 From 6abb9cb99f33b20c2f32f18a3ae9cc7543e46edb Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 15 May 2013 09:30:07 +0200 Subject: cfg80211: make WoWLAN configuration available to drivers Make the current WoWLAN configuration available to drivers at runtime. This isn't really useful for the normal WoWLAN behaviour and accessing it can also be racy, but drivers may use it for testing the WoWLAN device behaviour while the host stays up & running to observe the device. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 4 ++++ net/wireless/core.c | 4 +++- net/wireless/core.h | 21 ++++++++++---------- net/wireless/nl80211.c | 53 ++++++++++++++++++++++++++------------------------ net/wireless/sysfs.c | 8 ++++++-- 5 files changed, 52 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9f45d74ce3c2..b3b076a46d50 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2583,6 +2583,9 @@ struct wiphy_wowlan_support { * may request, if implemented. * * @wowlan: WoWLAN support information + * @wowlan_config: current WoWLAN configuration; this should usually not be + * used since access to it is necessarily racy, use the parameter passed + * to the suspend() operation instead. * * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden. @@ -2650,6 +2653,7 @@ struct wiphy { #ifdef CONFIG_PM struct wiphy_wowlan_support wowlan; + struct cfg80211_wowlan *wowlan_config; #endif u16 max_remain_on_channel_duration; diff --git a/net/wireless/core.c b/net/wireless/core.c index ee422871fe9e..41cec1776f4f 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -663,8 +663,10 @@ void wiphy_unregister(struct wiphy *wiphy) flush_work(&rdev->event_work); cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); - if (rdev->wowlan && rdev->ops->set_wakeup) +#ifdef CONFIG_PM + if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) rdev_set_wakeup(rdev, false); +#endif cfg80211_rdev_free_wowlan(rdev); } EXPORT_SYMBOL(wiphy_unregister); diff --git a/net/wireless/core.h b/net/wireless/core.h index b4b4a566626b..a65eaf8a84c1 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -74,8 +74,6 @@ struct cfg80211_registered_device { struct work_struct conn_work; struct work_struct event_work; - struct cfg80211_wowlan *wowlan; - struct delayed_work dfs_update_channels_wk; /* netlink port which started critical protocol (0 means not started) */ @@ -96,17 +94,20 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy) static inline void cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev) { +#ifdef CONFIG_PM int i; - if (!rdev->wowlan) + if (!rdev->wiphy.wowlan_config) return; - for (i = 0; i < rdev->wowlan->n_patterns; i++) - kfree(rdev->wowlan->patterns[i].mask); - kfree(rdev->wowlan->patterns); - if (rdev->wowlan->tcp && rdev->wowlan->tcp->sock) - sock_release(rdev->wowlan->tcp->sock); - kfree(rdev->wowlan->tcp); - kfree(rdev->wowlan); + for (i = 0; i < rdev->wiphy.wowlan_config->n_patterns; i++) + kfree(rdev->wiphy.wowlan_config->patterns[i].mask); + kfree(rdev->wiphy.wowlan_config->patterns); + if (rdev->wiphy.wowlan_config->tcp && + rdev->wiphy.wowlan_config->tcp->sock) + sock_release(rdev->wiphy.wowlan_config->tcp->sock); + kfree(rdev->wiphy.wowlan_config->tcp); + kfree(rdev->wiphy.wowlan_config); +#endif } extern struct workqueue_struct *cfg80211_wq; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a09f36bb957c..fb6abcb359a1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7489,28 +7489,29 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info) static int nl80211_send_wowlan_patterns(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { + struct cfg80211_wowlan *wowlan = rdev->wiphy.wowlan_config; struct nlattr *nl_pats, *nl_pat; int i, pat_len; - if (!rdev->wowlan->n_patterns) + if (!wowlan->n_patterns) return 0; nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN); if (!nl_pats) return -ENOBUFS; - for (i = 0; i < rdev->wowlan->n_patterns; i++) { + for (i = 0; i < wowlan->n_patterns; i++) { nl_pat = nla_nest_start(msg, i + 1); if (!nl_pat) return -ENOBUFS; - pat_len = rdev->wowlan->patterns[i].pattern_len; + pat_len = wowlan->patterns[i].pattern_len; if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), - rdev->wowlan->patterns[i].mask) || + wowlan->patterns[i].mask) || nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN, - pat_len, rdev->wowlan->patterns[i].pattern) || + pat_len, wowlan->patterns[i].pattern) || nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET, - rdev->wowlan->patterns[i].pkt_offset)) + wowlan->patterns[i].pkt_offset)) return -ENOBUFS; nla_nest_end(msg, nl_pat); } @@ -7573,12 +7574,12 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) !rdev->wiphy.wowlan.tcp) return -EOPNOTSUPP; - if (rdev->wowlan && rdev->wowlan->tcp) { + if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) { /* adjust size to have room for all the data */ - size += rdev->wowlan->tcp->tokens_size + - rdev->wowlan->tcp->payload_len + - rdev->wowlan->tcp->wake_len + - rdev->wowlan->tcp->wake_len / 8; + size += rdev->wiphy.wowlan_config->tcp->tokens_size + + rdev->wiphy.wowlan_config->tcp->payload_len + + rdev->wiphy.wowlan_config->tcp->wake_len + + rdev->wiphy.wowlan_config->tcp->wake_len / 8; } msg = nlmsg_new(size, GFP_KERNEL); @@ -7590,33 +7591,34 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) if (!hdr) goto nla_put_failure; - if (rdev->wowlan) { + if (rdev->wiphy.wowlan_config) { struct nlattr *nl_wowlan; nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!nl_wowlan) goto nla_put_failure; - if ((rdev->wowlan->any && + if ((rdev->wiphy.wowlan_config->any && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || - (rdev->wowlan->disconnect && + (rdev->wiphy.wowlan_config->disconnect && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || - (rdev->wowlan->magic_pkt && + (rdev->wiphy.wowlan_config->magic_pkt && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || - (rdev->wowlan->gtk_rekey_failure && + (rdev->wiphy.wowlan_config->gtk_rekey_failure && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || - (rdev->wowlan->eap_identity_req && + (rdev->wiphy.wowlan_config->eap_identity_req && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || - (rdev->wowlan->four_way_handshake && + (rdev->wiphy.wowlan_config->four_way_handshake && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || - (rdev->wowlan->rfkill_release && + (rdev->wiphy.wowlan_config->rfkill_release && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) goto nla_put_failure; if (nl80211_send_wowlan_patterns(msg, rdev)) goto nla_put_failure; - if (nl80211_send_wowlan_tcp(msg, rdev->wowlan->tcp)) + if (nl80211_send_wowlan_tcp(msg, + rdev->wiphy.wowlan_config->tcp)) goto nla_put_failure; nla_nest_end(msg, nl_wowlan); @@ -7783,7 +7785,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) struct cfg80211_wowlan *ntrig; struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; int err, i; - bool prev_enabled = rdev->wowlan; + bool prev_enabled = rdev->wiphy.wowlan_config; if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns && !rdev->wiphy.wowlan.tcp) @@ -7791,7 +7793,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { cfg80211_rdev_free_wowlan(rdev); - rdev->wowlan = NULL; + rdev->wiphy.wowlan_config = NULL; goto set_wakeup; } @@ -7927,11 +7929,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) goto error; } cfg80211_rdev_free_wowlan(rdev); - rdev->wowlan = ntrig; + rdev->wiphy.wowlan_config = ntrig; set_wakeup: - if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) - rdev_set_wakeup(rdev, rdev->wowlan); + if (rdev->ops->set_wakeup && + prev_enabled != !!rdev->wiphy.wowlan_config) + rdev_set_wakeup(rdev, rdev->wiphy.wowlan_config); return 0; error: diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 8f28b9f798d8..360a42c6f694 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -91,6 +91,7 @@ static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) cfg80211_leave(rdev, wdev); } +#ifdef CONFIG_PM static int wiphy_suspend(struct device *dev, pm_message_t state) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); @@ -100,10 +101,10 @@ static int wiphy_suspend(struct device *dev, pm_message_t state) rtnl_lock(); if (rdev->wiphy.registered) { - if (!rdev->wowlan) + if (!rdev->wiphy.wowlan_config) cfg80211_leave_all(rdev); if (rdev->ops->suspend) - ret = rdev_suspend(rdev, rdev->wowlan); + ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); if (ret == 1) { /* Driver refuse to configure wowlan */ cfg80211_leave_all(rdev); @@ -132,6 +133,7 @@ static int wiphy_resume(struct device *dev) return ret; } +#endif static const void *wiphy_namespace(struct device *d) { @@ -146,8 +148,10 @@ struct class ieee80211_class = { .dev_release = wiphy_dev_release, .dev_attrs = ieee80211_dev_attrs, .dev_uevent = wiphy_uevent, +#ifdef CONFIG_PM .suspend = wiphy_suspend, .resume = wiphy_resume, +#endif .ns_type = &net_ns_type_operations, .namespace = wiphy_namespace, }; -- cgit v1.2.3 From 7ec872114251b618adf5a2688de0ca00de63635d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 23 May 2013 01:11:11 +0000 Subject: net: ethtool: disambiguate XCVR_* meaning Add a comment which explains the real meaning of XCVR_INTERNAL (PHY and Ethernet MAC in the same package/die) and XCVR_EXTERNAL (PHY and Ethernet MAC in a different package/die). Most if not all of the drivers setting their transceiver type already do it the way the comment describes it. Signed-off-by: Florian Fainelli Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller --- include/uapi/linux/ethtool.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 0c9b44871df0..38dbafaa5341 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -993,8 +993,8 @@ enum ethtool_sfeatures_retval_bits { #define PORT_OTHER 0xff /* Which transceiver to use. */ -#define XCVR_INTERNAL 0x00 -#define XCVR_EXTERNAL 0x01 +#define XCVR_INTERNAL 0x00 /* PHY and MAC are in the same package */ +#define XCVR_EXTERNAL 0x01 /* PHY and MAC are in different packages */ #define XCVR_DUMMY1 0x02 #define XCVR_DUMMY2 0x03 #define XCVR_DUMMY3 0x04 -- cgit v1.2.3 From 4284b6a535a9aab33e5f3c37929143508dd2ee60 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 23 May 2013 01:11:12 +0000 Subject: phy: allow drivers to flag a PHY device as internal libphy currently always reports a PHY as an external transceiver from the ethtool output. This is inaccurate, because some drivers should be able to tell that a PHY device is an internal transceiver of an Ethernet MAC. Add a new flag (PHY_IS_INTERNAL) which can be set by PHY drivers just like other flags, and a corresponding helper: phy_is_internal() which can be used by networking drivers to query if a given PHY device is internal. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 3 ++- drivers/net/phy/phy_device.c | 3 +++ include/linux/phy.h | 12 ++++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 2d28a0ef4572..b2a94e436ed8 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -294,7 +294,8 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) cmd->duplex = phydev->duplex; cmd->port = PORT_MII; cmd->phy_address = phydev->addr; - cmd->transceiver = XCVR_EXTERNAL; + cmd->transceiver = phy_is_internal(phydev) ? + XCVR_INTERNAL : XCVR_EXTERNAL; cmd->autoneg = phydev->autoneg; return 0; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b55aa33a5b8b..74630e94fa3b 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1017,6 +1017,9 @@ static int phy_probe(struct device *dev) phy_interrupt_is_valid(phydev)) phydev->irq = PHY_POLL; + if (phydrv->flags & PHY_IS_INTERNAL) + phydev->is_internal = true; + mutex_lock(&phydev->lock); /* Start out supporting everything. Eventually, diff --git a/include/linux/phy.h b/include/linux/phy.h index fdfa11542974..ee411b0eef5d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -49,6 +49,7 @@ #define PHY_HAS_INTERRUPT 0x00000001 #define PHY_HAS_MAGICANEG 0x00000002 +#define PHY_IS_INTERNAL 0x00000004 /* Interface Mode definitions */ typedef enum { @@ -261,6 +262,7 @@ struct phy_c45_device_ids { * phy_id: UID for this device found during discovery * c45_ids: 802.3-c45 Device Identifers if is_c45. * is_c45: Set to true if this phy uses clause 45 addressing. + * is_internal: Set to true if this phy is internal to a MAC. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * addr: Bus address of PHY @@ -298,6 +300,7 @@ struct phy_device { struct phy_c45_device_ids c45_ids; bool is_c45; + bool is_internal; enum phy_state state; @@ -520,6 +523,15 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev) return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT; } +/** + * phy_is_internal - Convenience function for testing if a PHY is internal + * @phydev: the phy_device struct + */ +static inline bool phy_is_internal(struct phy_device *phydev) +{ + return phydev->is_internal; +} + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); -- cgit v1.2.3 From 1a37e412a0225fcba5587f24c0dfc7636efc8b69 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Thu, 23 May 2013 21:02:51 +0000 Subject: net: Use 16bits for *_headers fields of struct skbuff In order to mitigate ongoing incresase in the size of struct skbuff use 16 bit integer offsets rather than pointers for inner_*_headers. This appears to reduce the size of struct skbuff from 0xd0 to 0xc0 bytes on x86_64 with the following all unset. CONFIG_XFRM CONFIG_NF_CONNTRACK CONFIG_NF_CONNTRACK_MODULE NET_SKBUFF_NF_DEFRAG_NEEDED CONFIG_BRIDGE_NETFILTER CONFIG_NET_SCHED CONFIG_IPV6_NDISC_NODETYPE CONFIG_NET_DMA CONFIG_NETWORK_SECMARK Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- include/linux/skbuff.h | 119 +++---------------------------------------------- 1 file changed, 6 insertions(+), 113 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2e0ced1af3b1..5663e3592784 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -509,12 +509,12 @@ struct sk_buff { __u32 reserved_tailroom; }; - sk_buff_data_t inner_transport_header; - sk_buff_data_t inner_network_header; - sk_buff_data_t inner_mac_header; - sk_buff_data_t transport_header; - sk_buff_data_t network_header; - sk_buff_data_t mac_header; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; @@ -1527,7 +1527,6 @@ static inline void skb_reset_mac_len(struct sk_buff *skb) skb->mac_len = skb->network_header - skb->mac_header; } -#ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_inner_transport_header(const struct sk_buff *skb) { @@ -1638,112 +1637,6 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) skb->mac_header += offset; } -#else /* NET_SKBUFF_DATA_USES_OFFSET */ -static inline unsigned char *skb_inner_transport_header(const struct sk_buff - *skb) -{ - return skb->inner_transport_header; -} - -static inline void skb_reset_inner_transport_header(struct sk_buff *skb) -{ - skb->inner_transport_header = skb->data; -} - -static inline void skb_set_inner_transport_header(struct sk_buff *skb, - const int offset) -{ - skb->inner_transport_header = skb->data + offset; -} - -static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) -{ - return skb->inner_network_header; -} - -static inline void skb_reset_inner_network_header(struct sk_buff *skb) -{ - skb->inner_network_header = skb->data; -} - -static inline void skb_set_inner_network_header(struct sk_buff *skb, - const int offset) -{ - skb->inner_network_header = skb->data + offset; -} - -static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) -{ - return skb->inner_mac_header; -} - -static inline void skb_reset_inner_mac_header(struct sk_buff *skb) -{ - skb->inner_mac_header = skb->data; -} - -static inline void skb_set_inner_mac_header(struct sk_buff *skb, - const int offset) -{ - skb->inner_mac_header = skb->data + offset; -} -static inline bool skb_transport_header_was_set(const struct sk_buff *skb) -{ - return skb->transport_header != NULL; -} - -static inline unsigned char *skb_transport_header(const struct sk_buff *skb) -{ - return skb->transport_header; -} - -static inline void skb_reset_transport_header(struct sk_buff *skb) -{ - skb->transport_header = skb->data; -} - -static inline void skb_set_transport_header(struct sk_buff *skb, - const int offset) -{ - skb->transport_header = skb->data + offset; -} - -static inline unsigned char *skb_network_header(const struct sk_buff *skb) -{ - return skb->network_header; -} - -static inline void skb_reset_network_header(struct sk_buff *skb) -{ - skb->network_header = skb->data; -} - -static inline void skb_set_network_header(struct sk_buff *skb, const int offset) -{ - skb->network_header = skb->data + offset; -} - -static inline unsigned char *skb_mac_header(const struct sk_buff *skb) -{ - return skb->mac_header; -} - -static inline int skb_mac_header_was_set(const struct sk_buff *skb) -{ - return skb->mac_header != NULL; -} - -static inline void skb_reset_mac_header(struct sk_buff *skb) -{ - skb->mac_header = skb->data; -} - -static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) -{ - skb->mac_header = skb->data + offset; -} -#endif /* NET_SKBUFF_DATA_USES_OFFSET */ - static inline void skb_probe_transport_header(struct sk_buff *skb, const int offset_hint) { -- cgit v1.2.3 From 0d89d2035fe063461a5ddb609b2c12e7fb006e44 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Thu, 23 May 2013 21:02:52 +0000 Subject: MPLS: Add limited GSO support In the case where a non-MPLS packet is received and an MPLS stack is added it may well be the case that the original skb is GSO but the NIC used for transmit does not support GSO of MPLS packets. The aim of this code is to provide GSO in software for MPLS packets whose skbs are GSO. SKB Usage: When an implementation adds an MPLS stack to a non-MPLS packet it should do the following to skb metadata: * Set skb->inner_protocol to the old non-MPLS ethertype of the packet. skb->inner_protocol is added by this patch. * Set skb->protocol to the new MPLS ethertype of the packet. * Set skb->network_header to correspond to the end of the L3 header, including the MPLS label stack. I have posted a patch, "[PATCH v3.29] datapath: Add basic MPLS support to kernel" which adds MPLS support to the kernel datapath of Open vSwtich. That patch sets the above requirements in datapath/actions.c:push_mpls() and was used to exercise this code. The datapath patch is against the Open vSwtich tree but it is intended that it be added to the Open vSwtich code present in the mainline Linux kernel at some point. Features: I believe that the approach that I have taken is at least partially consistent with the handling of other protocols. Jesse, I understand that you have some ideas here. I am more than happy to change my implementation. This patch adds dev->mpls_features which may be used by devices to advertise features supported for MPLS packets. A new NETIF_F_MPLS_GSO feature is added for devices which support hardware MPLS GSO offload. Currently no devices support this and MPLS GSO always falls back to software. Alternate Implementation: One possible alternate implementation is to teach netif_skb_features() and skb_network_protocol() about MPLS, in a similar way to their understanding of VLANs. I believe this would avoid the need for net/mpls/mpls_gso.c and in particular the calls to __skb_push() and __skb_push() in mpls_gso_segment(). I have decided on the implementation in this patch as it should not introduce any overhead in the case where mpls_gso is not compiled into the kernel or inserted as a module. MPLS GSO suggested by Jesse Gross. Based in part on "v4 GRE: Add TCP segmentation offload for GRE" by Pravin B Shelar. Cc: Jesse Gross Cc: Pravin B Shelar Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 4 +- include/linux/netdevice.h | 2 + include/linux/skbuff.h | 4 ++ net/Kconfig | 1 + net/Makefile | 1 + net/core/dev.c | 4 ++ net/core/ethtool.c | 1 + net/ipv4/af_inet.c | 1 + net/ipv4/tcp.c | 1 + net/ipv4/udp.c | 2 +- net/ipv6/ip6_offload.c | 1 + net/ipv6/udp_offload.c | 3 +- net/mpls/Kconfig | 9 ++++ net/mpls/Makefile | 4 ++ net/mpls/mpls_gso.c | 108 ++++++++++++++++++++++++++++++++++++++++ 15 files changed, 143 insertions(+), 3 deletions(-) create mode 100644 net/mpls/Kconfig create mode 100644 net/mpls/Makefile create mode 100644 net/mpls/mpls_gso.c (limited to 'include') diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 09906b7ca47d..a2a89a5c7be5 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -43,8 +43,9 @@ enum { NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ + NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_UDP_TUNNEL_BIT, + NETIF_F_GSO_MPLS_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ @@ -107,6 +108,7 @@ enum { #define NETIF_F_RXALL __NETIF_F(RXALL) #define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) +#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ea7b6bce9ea0..6b2bb460d1d7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1088,6 +1088,8 @@ struct net_device { * need to set them appropriately. */ netdev_features_t hw_enc_features; + /* mask of fetures inheritable by MPLS */ + netdev_features_t mpls_features; /* Interface index. Unique device identifier */ int ifindex; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5663e3592784..8f2b830772a8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -319,6 +319,8 @@ enum { SKB_GSO_GRE = 1 << 6, SKB_GSO_UDP_TUNNEL = 1 << 7, + + SKB_GSO_MPLS = 1 << 8, }; #if BITS_PER_LONG > 32 @@ -389,6 +391,7 @@ typedef unsigned char *sk_buff_data_t; * @dropcount: total number of sk_receive_queue overflows * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information + * @inner_protocol: Protocol (encapsulation) * @inner_transport_header: Inner transport layer header (encapsulation) * @inner_network_header: Network layer header (encapsulation) * @inner_mac_header: Link layer header (encapsulation) @@ -509,6 +512,7 @@ struct sk_buff { __u32 reserved_tailroom; }; + __be16 inner_protocol; __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; diff --git a/net/Kconfig b/net/Kconfig index 08de901415ee..523e43e6da1b 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -218,6 +218,7 @@ source "net/batman-adv/Kconfig" source "net/openvswitch/Kconfig" source "net/vmw_vsock/Kconfig" source "net/netlink/Kconfig" +source "net/mpls/Kconfig" config RPS boolean diff --git a/net/Makefile b/net/Makefile index 091e7b04f301..9492e8cb64e9 100644 --- a/net/Makefile +++ b/net/Makefile @@ -70,3 +70,4 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv/ obj-$(CONFIG_NFC) += nfc/ obj-$(CONFIG_OPENVSWITCH) += openvswitch/ obj-$(CONFIG_VSOCKETS) += vmw_vsock/ +obj-$(CONFIG_NET_MPLS_GSO) += mpls/ diff --git a/net/core/dev.c b/net/core/dev.c index 50c02ded1d69..2f09cb29cc95 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5277,6 +5277,10 @@ int register_netdevice(struct net_device *dev) */ dev->hw_enc_features |= NETIF_F_SG; + /* Make NETIF_F_SG inheritable to MPLS. + */ + dev->mpls_features |= NETIF_F_SG; + ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); ret = notifier_to_errno(ret); if (ret) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 22efdaa76ebf..4e6f63ade741 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -82,6 +82,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", + [NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d01be2a3ae53..b05ae96aec44 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1295,6 +1295,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, SKB_GSO_GRE | SKB_GSO_TCPV6 | SKB_GSO_UDP_TUNNEL | + SKB_GSO_MPLS | 0))) goto out; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d87ce72ca8aa..ba4186e1dca9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2917,6 +2917,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, SKB_GSO_TCP_ECN | SKB_GSO_TCPV6 | SKB_GSO_GRE | + SKB_GSO_MPLS | SKB_GSO_UDP_TUNNEL | 0) || !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0bf5d399a03c..aa5eff46d137 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2381,7 +2381,7 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | - SKB_GSO_GRE) || + SKB_GSO_GRE | SKB_GSO_MPLS) || !(type & (SKB_GSO_UDP)))) goto out; diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 71b766ee821d..a263b990ee11 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -98,6 +98,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL | + SKB_GSO_MPLS | SKB_GSO_TCPV6 | 0))) goto out; diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 3bb3a891a424..76d401a93c7a 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -63,7 +63,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | - SKB_GSO_GRE) || + SKB_GSO_GRE | + SKB_GSO_MPLS) || !(type & (SKB_GSO_UDP)))) goto out; diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig new file mode 100644 index 000000000000..37421db88965 --- /dev/null +++ b/net/mpls/Kconfig @@ -0,0 +1,9 @@ +# +# MPLS configuration +# +config NET_MPLS_GSO + tristate "MPLS: GSO support" + help + This is helper module to allow segmentation of non-MPLS GSO packets + that have had MPLS stack entries pushed onto them and thus + become MPLS GSO packets. diff --git a/net/mpls/Makefile b/net/mpls/Makefile new file mode 100644 index 000000000000..0a3c171be537 --- /dev/null +++ b/net/mpls/Makefile @@ -0,0 +1,4 @@ +# +# Makefile for MPLS. +# +obj-y += mpls_gso.o diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c new file mode 100644 index 000000000000..1bec1219ab81 --- /dev/null +++ b/net/mpls/mpls_gso.c @@ -0,0 +1,108 @@ +/* + * MPLS GSO Support + * + * Authors: Simon Horman (horms@verge.net.au) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Based on: GSO portions of net/ipv4/gre.c + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + netdev_features_t mpls_features; + __be16 mpls_protocol; + + if (unlikely(skb_shinfo(skb)->gso_type & + ~(SKB_GSO_TCPV4 | + SKB_GSO_TCPV6 | + SKB_GSO_UDP | + SKB_GSO_DODGY | + SKB_GSO_TCP_ECN | + SKB_GSO_GRE | + SKB_GSO_MPLS))) + goto out; + + /* Setup inner SKB. */ + mpls_protocol = skb->protocol; + skb->protocol = skb->inner_protocol; + + /* Push back the mac header that skb_mac_gso_segment() has pulled. + * It will be re-pulled by the call to skb_mac_gso_segment() below + */ + __skb_push(skb, skb->mac_len); + + /* Segment inner packet. */ + mpls_features = skb->dev->mpls_features & netif_skb_features(skb); + segs = skb_mac_gso_segment(skb, mpls_features); + + + /* Restore outer protocol. */ + skb->protocol = mpls_protocol; + + /* Re-pull the mac header that the call to skb_mac_gso_segment() + * above pulled. It will be re-pushed after returning + * skb_mac_gso_segment(), an indirect caller of this function. + */ + __skb_push(skb, skb->data - skb_mac_header(skb)); + +out: + return segs; +} + +static int mpls_gso_send_check(struct sk_buff *skb) +{ + return 0; +} + +static struct packet_offload mpls_mc_offload = { + .type = cpu_to_be16(ETH_P_MPLS_MC), + .callbacks = { + .gso_send_check = mpls_gso_send_check, + .gso_segment = mpls_gso_segment, + }, +}; + +static struct packet_offload mpls_uc_offload = { + .type = cpu_to_be16(ETH_P_MPLS_UC), + .callbacks = { + .gso_send_check = mpls_gso_send_check, + .gso_segment = mpls_gso_segment, + }, +}; + +static int __init mpls_gso_init(void) +{ + pr_info("MPLS GSO support\n"); + + dev_add_offload(&mpls_uc_offload); + dev_add_offload(&mpls_mc_offload); + + return 0; +} + +static void __exit mpls_gso_exit(void) +{ + dev_remove_offload(&mpls_uc_offload); + dev_remove_offload(&mpls_mc_offload); +} + +module_init(mpls_gso_init); +module_exit(mpls_gso_exit); + +MODULE_DESCRIPTION("MPLS GSO support"); +MODULE_AUTHOR("Simon Horman (horms@verge.net.au)"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From da6e378ba918cd0feeb90eeb84d8b42148bb0c82 Mon Sep 17 00:00:00 2001 From: dingtianhong Date: Mon, 27 May 2013 19:53:31 +0000 Subject: netpoll: remove return value from netpoll_rx_disable() The netpoll_rx_disable() will always return 0, it is no use and looks wordy, so remove the unnecessary code and get rid of it in _dev_open and _dev_close. Signed-off-by: Ding Tianhong Signed-off-by: David S. Miller --- include/linux/netpoll.h | 4 ++-- net/core/dev.c | 15 ++++----------- net/core/netpoll.c | 3 +-- 3 files changed, 7 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index fa2cb76a7029..f3c7c24bec1c 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -53,10 +53,10 @@ struct netpoll_info { }; #ifdef CONFIG_NETPOLL -extern int netpoll_rx_disable(struct net_device *dev); +extern void netpoll_rx_disable(struct net_device *dev); extern void netpoll_rx_enable(struct net_device *dev); #else -static inline int netpoll_rx_disable(struct net_device *dev) { return 0; } +static inline void netpoll_rx_disable(struct net_device *dev) { return; } static inline void netpoll_rx_enable(struct net_device *dev) { return; } #endif diff --git a/net/core/dev.c b/net/core/dev.c index 2f09cb29cc95..5f747974ac58 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1198,9 +1198,7 @@ static int __dev_open(struct net_device *dev) * If we don't do this there is a chance ndo_poll_controller * or ndo_poll may be running while we open the device */ - ret = netpoll_rx_disable(dev); - if (ret) - return ret; + netpoll_rx_disable(dev); ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); ret = notifier_to_errno(ret); @@ -1309,9 +1307,7 @@ static int __dev_close(struct net_device *dev) LIST_HEAD(single); /* Temporarily disable netpoll until the interface is down */ - retval = netpoll_rx_disable(dev); - if (retval) - return retval; + netpoll_rx_disable(dev); list_add(&dev->unreg_list, &single); retval = __dev_close_many(&single); @@ -1353,14 +1349,11 @@ static int dev_close_many(struct list_head *head) */ int dev_close(struct net_device *dev) { - int ret = 0; if (dev->flags & IFF_UP) { LIST_HEAD(single); /* Block netpoll rx while the interface is going down */ - ret = netpoll_rx_disable(dev); - if (ret) - return ret; + netpoll_rx_disable(dev); list_add(&dev->unreg_list, &single); dev_close_many(&single); @@ -1368,7 +1361,7 @@ int dev_close(struct net_device *dev) netpoll_rx_enable(dev); } - return ret; + return 0; } EXPORT_SYMBOL(dev_close); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index cec074be8c43..37deedd48bcc 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -247,7 +247,7 @@ static void netpoll_poll_dev(struct net_device *dev) zap_completion_queue(); } -int netpoll_rx_disable(struct net_device *dev) +void netpoll_rx_disable(struct net_device *dev) { struct netpoll_info *ni; int idx; @@ -257,7 +257,6 @@ int netpoll_rx_disable(struct net_device *dev) if (ni) down(&ni->dev_lock); srcu_read_unlock(&netpoll_srcu, idx); - return 0; } EXPORT_SYMBOL(netpoll_rx_disable); -- cgit v1.2.3 From 351638e7deeed2ec8ce451b53d33921b3da68f83 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 28 May 2013 01:30:21 +0000 Subject: net: pass info struct via netdevice notifier So far, only net_device * could be passed along with netdevice notifier event. This patch provides a possibility to pass custom structure able to provide info that event listener needs to know. Signed-off-by: Jiri Pirko v2->v3: fix typo on simeth shortened dev_getter shortened notifier_info struct name v1->v2: fix notifier_call parameter in call_netdevice_notifier() Signed-off-by: David S. Miller --- arch/ia64/hp/sim/simeth.c | 2 +- arch/mips/txx9/generic/setup_tx4939.c | 3 +- drivers/infiniband/core/cma.c | 4 +- drivers/infiniband/hw/mlx4/main.c | 2 +- drivers/net/bonding/bond_main.c | 2 +- drivers/net/can/led.c | 4 +- drivers/net/ethernet/broadcom/cnic.c | 2 +- drivers/net/ethernet/marvell/skge.c | 2 +- drivers/net/ethernet/marvell/sky2.c | 2 +- .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 2 +- drivers/net/ethernet/sfc/efx.c | 2 +- drivers/net/hamradio/bpqether.c | 7 +-- drivers/net/macvlan.c | 2 +- drivers/net/macvtap.c | 2 +- drivers/net/netconsole.c | 5 +- drivers/net/ppp/pppoe.c | 2 +- drivers/net/team/team.c | 2 +- drivers/net/wan/dlci.c | 2 +- drivers/net/wan/hdlc.c | 2 +- drivers/net/wan/lapbether.c | 2 +- drivers/scsi/fcoe/fcoe.c | 2 +- drivers/scsi/fcoe/fcoe_transport.c | 2 +- drivers/staging/csr/netdev.c | 2 +- drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c | 2 +- drivers/staging/ft1000/ft1000-usb/ft1000_proc.c | 2 +- drivers/staging/silicom/bpctl_mod.c | 2 +- include/linux/netdevice.h | 13 +++++ net/8021q/vlan.c | 2 +- net/appletalk/aarp.c | 2 +- net/appletalk/ddp.c | 2 +- net/atm/clip.c | 4 +- net/atm/mpc.c | 6 +-- net/ax25/af_ax25.c | 6 +-- net/batman-adv/hard-interface.c | 2 +- net/bridge/br_notify.c | 2 +- net/caif/caif_dev.c | 4 +- net/caif/caif_usb.c | 4 +- net/can/af_can.c | 4 +- net/can/bcm.c | 4 +- net/can/gw.c | 4 +- net/can/raw.c | 4 +- net/core/dev.c | 56 ++++++++++++++++++---- net/core/drop_monitor.c | 4 +- net/core/dst.c | 2 +- net/core/fib_rules.c | 4 +- net/core/netprio_cgroup.c | 2 +- net/core/pktgen.c | 2 +- net/core/rtnetlink.c | 2 +- net/decnet/af_decnet.c | 4 +- net/ieee802154/6lowpan.c | 5 +- net/ipv4/arp.c | 2 +- net/ipv4/devinet.c | 2 +- net/ipv4/fib_frontend.c | 2 +- net/ipv4/ipmr.c | 2 +- net/ipv4/netfilter/ipt_MASQUERADE.c | 2 +- net/ipv6/addrconf.c | 4 +- net/ipv6/ip6mr.c | 2 +- net/ipv6/ndisc.c | 2 +- net/ipv6/netfilter/ip6t_MASQUERADE.c | 2 +- net/ipv6/route.c | 4 +- net/ipx/af_ipx.c | 2 +- net/iucv/af_iucv.c | 2 +- net/mac80211/iface.c | 5 +- net/netfilter/ipvs/ip_vs_ctl.c | 4 +- net/netfilter/nfnetlink_queue_core.c | 2 +- net/netfilter/xt_TEE.c | 2 +- net/netlabel/netlabel_unlabeled.c | 7 ++- net/netrom/af_netrom.c | 2 +- net/openvswitch/dp_notify.c | 2 +- net/packet/af_packet.c | 5 +- net/phonet/pn_dev.c | 4 +- net/rose/af_rose.c | 6 +-- net/sched/act_mirred.c | 2 +- net/tipc/eth_media.c | 4 +- net/tipc/ib_media.c | 4 +- net/wireless/core.c | 5 +- net/x25/af_x25.c | 2 +- net/xfrm/xfrm_policy.c | 2 +- security/selinux/netif.c | 2 +- 80 files changed, 172 insertions(+), 127 deletions(-) (limited to 'include') diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index c13064e422df..d1b04c4c95e3 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -268,7 +268,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev) static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct simeth_local *local; struct in_device *in_dev; struct in_ifaddr **ifap = NULL; diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index 729a50991780..b7eccbd17bf7 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -331,7 +331,8 @@ static int tx4939_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + if (event == NETDEV_CHANGE && netif_carrier_ok(dev)) { __u64 bit = 0; if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(0)) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 71c2c7116802..34fbc2f60a09 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3269,9 +3269,9 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id } static int cma_netdev_callback(struct notifier_block *self, unsigned long event, - void *ctx) + void *ptr) { - struct net_device *ndev = (struct net_device *)ctx; + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct cma_device *cma_dev; struct rdma_id_private *id_priv; int ret = NOTIFY_DONE; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 23d734349d8e..a188d3178559 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1161,7 +1161,7 @@ static void netdev_removed(struct mlx4_ib_dev *dev, int port) static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct mlx4_ib_dev *ibdev; struct net_device *oldnd; struct mlx4_ib_iboe *iboe; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 29b846cbfb48..f4489d65bf33 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3277,7 +3277,7 @@ static int bond_slave_netdev_event(unsigned long event, static int bond_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *event_dev = (struct net_device *)ptr; + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); pr_debug("event_dev: %s, event: %lx\n", event_dev ? event_dev->name : "None", diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c index f27fca65dc4a..a3d99a8fd2d1 100644 --- a/drivers/net/can/led.c +++ b/drivers/net/can/led.c @@ -88,9 +88,9 @@ EXPORT_SYMBOL_GPL(devm_can_led_init); /* NETDEV rename notifier to rename the associated led triggers too */ static int can_led_notifier(struct notifier_block *nb, unsigned long msg, - void *data) + void *ptr) { - struct net_device *netdev = data; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct can_priv *priv = safe_candev_priv(netdev); char name[CAN_LED_NAME_SZ]; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 6b0dc131b20e..d78d4cf140ed 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5622,7 +5622,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, static int cnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct cnic_dev *dev; int new_dev = 0; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 171f4b3dda07..c896079728e1 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3706,7 +3706,7 @@ static const struct file_operations skge_debug_fops = { static int skge_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct skge_port *skge; struct dentry *d; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index d175bbd3ffd3..e09a8c6f8536 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4642,7 +4642,7 @@ static const struct file_operations sky2_debug_fops = { static int sky2_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct sky2_port *sky2 = netdev_priv(dev); if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index af951f343ff6..51e13d92761e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -3311,7 +3311,7 @@ static int netxen_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *orig_dev = dev; struct net_device *slave; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index da82f2eb73b4..6bb56d43614b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3530,7 +3530,7 @@ static int qlcnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct qlcnic_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); recheck: if (dev == NULL) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 39e4cb39de29..46cc11d5e205 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2120,7 +2120,7 @@ static void efx_update_name(struct efx_nic *efx) static int efx_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *net_dev = ptr; + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); if (net_dev->netdev_ops == &efx_netdev_ops && event == NETDEV_CHANGENAME) diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 02de6c891670..f91bf0ddf031 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -103,7 +103,7 @@ static struct packet_type bpq_packet_type __read_mostly = { }; static struct notifier_block bpq_dev_notifier = { - .notifier_call =bpq_device_event, + .notifier_call = bpq_device_event, }; @@ -544,9 +544,10 @@ static void bpq_free_device(struct net_device *ndev) /* * Handle device status changes. */ -static int bpq_device_event(struct notifier_block *this,unsigned long event, void *ptr) +static int bpq_device_event(struct notifier_block *this, + unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 1c502bb0c916..edfddc5f61b4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -921,7 +921,7 @@ static struct rtnl_link_ops macvlan_link_ops = { static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvlan_dev *vlan, *next; struct macvlan_port *port; LIST_HEAD(list_kill); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 59e9605de316..68efb91a5633 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1053,7 +1053,7 @@ EXPORT_SYMBOL_GPL(macvtap_get_socket); static int macvtap_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvlan_dev *vlan; struct device *classdev; dev_t devt; diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 59ac143dec25..1d1d0a12765c 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -653,12 +653,11 @@ static struct configfs_subsystem netconsole_subsys = { /* Handle network interface device notifications */ static int netconsole_netdev_event(struct notifier_block *this, - unsigned long event, - void *ptr) + unsigned long event, void *ptr) { unsigned long flags; struct netconsole_target *nt; - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); bool stopped = false; if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index bb07ba94c3aa..5f66e30d9823 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -338,7 +338,7 @@ static void pppoe_flush_dev(struct net_device *dev) static int pppoe_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); /* Only look at sockets that are using this specific device. */ switch (event) { diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 7c43261975bd..9273f48a512b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2647,7 +2647,7 @@ static void team_port_change_check(struct team_port *port, bool linkup) static int team_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *) ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct team_port *port; port = team_port_get_rtnl(dev); diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 147614ed86aa..70ac59929f80 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -477,7 +477,7 @@ static void dlci_setup(struct net_device *dev) static int dlci_dev_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *) ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev_net(dev) != &init_net) return NOTIFY_DONE; diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index a0a932c63d0a..9c33ca918e19 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -99,7 +99,7 @@ static inline void hdlc_proto_stop(struct net_device *dev) static int hdlc_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); hdlc_device *hdlc; unsigned long flags; int on; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index a73b49eb87e3..a33a46fa88dd 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -370,7 +370,7 @@ static int lapbeth_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct lapbethdev *lapbeth; - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev_net(dev) != &init_net) return NOTIFY_DONE; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 292b24f9bf93..ee721b6cbcdf 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1975,7 +1975,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, { struct fcoe_ctlr_device *cdev; struct fc_lport *lport = NULL; - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fcoe_port *port; diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index f3a5a53e8631..01adbe0ec53b 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -704,7 +704,7 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_UNREGISTER: diff --git a/drivers/staging/csr/netdev.c b/drivers/staging/csr/netdev.c index a0177d998978..d49cdf84a496 100644 --- a/drivers/staging/csr/netdev.c +++ b/drivers/staging/csr/netdev.c @@ -2891,7 +2891,7 @@ void uf_net_get_name(struct net_device *dev, char *name, int len) */ static int uf_netdev_event(struct notifier_block *notif, unsigned long event, void* ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(netdev); unifi_priv_t *priv = NULL; static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}}; diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c index 94e426e4d98b..b2330f1df7e7 100644 --- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c +++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c @@ -164,7 +164,7 @@ static const struct file_operations ft1000_proc_fops = { static int ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ft1000_info *info; info = netdev_priv(dev); diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c index eca6f0292b4b..5ead942be680 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c @@ -166,7 +166,7 @@ static const struct file_operations ft1000_proc_fops = { static int ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ft1000_info *info; struct proc_dir_entry *ft1000_proc_file; diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c index b7e570ccb759..c8ddb99e8526 100644 --- a/drivers/staging/silicom/bpctl_mod.c +++ b/drivers/staging/silicom/bpctl_mod.c @@ -133,7 +133,7 @@ static unsigned long str_to_hex(char *p); static int bp_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); static bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL; int dev_num = 0, ret = 0, ret_d = 0, time_left = 0; /* printk("BP_PROC_SUPPORT event =%d %s %d\n", event,dev->name, dev->ifindex ); */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6b2bb460d1d7..13a34848b5e1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1599,6 +1599,19 @@ struct packet_offload { extern int register_netdevice_notifier(struct notifier_block *nb); extern int unregister_netdevice_notifier(struct notifier_block *nb); + +struct netdev_notifier_info { + struct net_device *dev; +}; + +static inline struct net_device * +netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) +{ + return info->dev; +} + +extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, + struct netdev_notifier_info *info); extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 9424f3718ea7..2fb2d88e8c2e 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -341,7 +341,7 @@ static void __vlan_device_event(struct net_device *dev, unsigned long event) static int vlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vlan_group *grp; struct vlan_info *vlan_info; int i, flgs; diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 173a2e82f486..690356fa52b9 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -332,7 +332,7 @@ static void aarp_expire_timeout(unsigned long unused) static int aarp_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ct; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index ef12839a7cfe..7fee50d637f9 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -644,7 +644,7 @@ static inline void atalk_dev_down(struct net_device *dev) static int ddp_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/atm/clip.c b/net/atm/clip.c index 8ae3a7879335..cce241eb01d9 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -539,9 +539,9 @@ static int clip_create(int number) } static int clip_device_event(struct notifier_block *this, unsigned long event, - void *arg) + void *ptr) { - struct net_device *dev = arg; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/atm/mpc.c b/net/atm/mpc.c index d4cc1be5c364..3af12755cd04 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -998,14 +998,12 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc) } static int mpoa_event_listener(struct notifier_block *mpoa_notifier, - unsigned long event, void *dev_ptr) + unsigned long event, void *ptr) { - struct net_device *dev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct mpoa_client *mpc; struct lec_priv *priv; - dev = dev_ptr; - if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index e277e38f736b..4b4d2b779ec1 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -111,9 +111,9 @@ again: * Handle device status changes. */ static int ax25_device_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; @@ -1974,7 +1974,7 @@ static struct packet_type ax25_packet_type __read_mostly = { }; static struct notifier_block ax25_dev_notifier = { - .notifier_call =ax25_device_event, + .notifier_call = ax25_device_event, }; static int __init ax25_init(void) diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 522243aff2f3..b6504eac0ed8 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -595,7 +595,7 @@ void batadv_hardif_remove_interfaces(void) static int batadv_hard_if_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *net_dev = ptr; + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); struct batadv_hard_iface *hard_iface; struct batadv_hard_iface *primary_if = NULL; struct batadv_priv *bat_priv; diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 1644b3e1f947..3a3f371b2841 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c @@ -31,7 +31,7 @@ struct notifier_block br_device_notifier = { */ static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_bridge_port *p; struct net_bridge *br; bool changed_addr; diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 1f9ece1a9c34..4dca159435cf 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -352,9 +352,9 @@ EXPORT_SYMBOL(caif_enroll_dev); /* notify Caif of device events */ static int caif_device_notify(struct notifier_block *me, unsigned long what, - void *arg) + void *ptr) { - struct net_device *dev = arg; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct caif_device_entry *caifd = NULL; struct caif_dev_common *caifdev; struct cfcnfg *cfg; diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index 942e00a425fd..75ed04b78fa4 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -121,9 +121,9 @@ static struct packet_type caif_usb_type __read_mostly = { }; static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, - void *arg) + void *ptr) { - struct net_device *dev = arg; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct caif_dev_common common; struct cflayer *layer, *link_support; struct usbnet *usbnet; diff --git a/net/can/af_can.c b/net/can/af_can.c index c4e50852c9f4..3ab8dd2e1282 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -794,9 +794,9 @@ EXPORT_SYMBOL(can_proto_unregister); * af_can notifier to create/remove CAN netdevice specific structs */ static int can_notifier(struct notifier_block *nb, unsigned long msg, - void *data) + void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct dev_rcv_lists *d; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/can/bcm.c b/net/can/bcm.c index 8f113e6ff327..46f20bfafc0e 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1350,9 +1350,9 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, * notification handler for netdevice status changes */ static int bcm_notifier(struct notifier_block *nb, unsigned long msg, - void *data) + void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); struct sock *sk = &bo->sk; struct bcm_op *op; diff --git a/net/can/gw.c b/net/can/gw.c index 3ee690e8c7d3..2f291f961a17 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -445,9 +445,9 @@ static inline void cgw_unregister_filter(struct cgw_job *gwj) } static int cgw_notifier(struct notifier_block *nb, - unsigned long msg, void *data) + unsigned long msg, void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/can/raw.c b/net/can/raw.c index 1085e65f848e..641e1c895123 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -239,9 +239,9 @@ static int raw_enable_allfilters(struct net_device *dev, struct sock *sk) } static int raw_notifier(struct notifier_block *nb, - unsigned long msg, void *data) + unsigned long msg, void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); struct sock *sk = &ro->sk; diff --git a/net/core/dev.c b/net/core/dev.c index 5f747974ac58..54fce6006a83 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1391,6 +1391,20 @@ void dev_disable_lro(struct net_device *dev) } EXPORT_SYMBOL(dev_disable_lro); +static void netdev_notifier_info_init(struct netdev_notifier_info *info, + struct net_device *dev) +{ + info->dev = dev; +} + +static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, + struct net_device *dev) +{ + struct netdev_notifier_info info; + + netdev_notifier_info_init(&info, dev); + return nb->notifier_call(nb, val, &info); +} static int dev_boot_phase = 1; @@ -1423,7 +1437,7 @@ int register_netdevice_notifier(struct notifier_block *nb) goto unlock; for_each_net(net) { for_each_netdev(net, dev) { - err = nb->notifier_call(nb, NETDEV_REGISTER, dev); + err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); err = notifier_to_errno(err); if (err) goto rollback; @@ -1431,7 +1445,7 @@ int register_netdevice_notifier(struct notifier_block *nb) if (!(dev->flags & IFF_UP)) continue; - nb->notifier_call(nb, NETDEV_UP, dev); + call_netdevice_notifier(nb, NETDEV_UP, dev); } } @@ -1447,10 +1461,11 @@ rollback: goto outroll; if (dev->flags & IFF_UP) { - nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); - nb->notifier_call(nb, NETDEV_DOWN, dev); + call_netdevice_notifier(nb, NETDEV_GOING_DOWN, + dev); + call_netdevice_notifier(nb, NETDEV_DOWN, dev); } - nb->notifier_call(nb, NETDEV_UNREGISTER, dev); + call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); } } @@ -1488,10 +1503,11 @@ int unregister_netdevice_notifier(struct notifier_block *nb) for_each_net(net) { for_each_netdev(net, dev) { if (dev->flags & IFF_UP) { - nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); - nb->notifier_call(nb, NETDEV_DOWN, dev); + call_netdevice_notifier(nb, NETDEV_GOING_DOWN, + dev); + call_netdevice_notifier(nb, NETDEV_DOWN, dev); } - nb->notifier_call(nb, NETDEV_UNREGISTER, dev); + call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); } } unlock: @@ -1500,6 +1516,25 @@ unlock: } EXPORT_SYMBOL(unregister_netdevice_notifier); +/** + * call_netdevice_notifiers_info - call all network notifier blocks + * @val: value passed unmodified to notifier function + * @dev: net_device pointer passed unmodified to notifier function + * @info: notifier information data + * + * Call all network notifier blocks. Parameters and return value + * are as for raw_notifier_call_chain(). + */ + +int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, + struct netdev_notifier_info *info) +{ + ASSERT_RTNL(); + netdev_notifier_info_init(info, dev); + return raw_notifier_call_chain(&netdev_chain, val, info); +} +EXPORT_SYMBOL(call_netdevice_notifiers_info); + /** * call_netdevice_notifiers - call all network notifier blocks * @val: value passed unmodified to notifier function @@ -1511,8 +1546,9 @@ EXPORT_SYMBOL(unregister_netdevice_notifier); int call_netdevice_notifiers(unsigned long val, struct net_device *dev) { - ASSERT_RTNL(); - return raw_notifier_call_chain(&netdev_chain, val, dev); + struct netdev_notifier_info info; + + return call_netdevice_notifiers_info(val, dev, &info); } EXPORT_SYMBOL(call_netdevice_notifiers); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index d23b6682f4e9..5e78d44333b9 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -295,9 +295,9 @@ static int net_dm_cmd_trace(struct sk_buff *skb, } static int dropmon_net_event(struct notifier_block *ev_block, - unsigned long event, void *ptr) + unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct dm_hw_stat_delta *new_stat = NULL; struct dm_hw_stat_delta *tmp; diff --git a/net/core/dst.c b/net/core/dst.c index df9cc810ec8e..ca4231ec7347 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -372,7 +372,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct dst_entry *dst, *last = NULL; switch (event) { diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index d5a9f8ead0d8..21735440c44a 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -705,9 +705,9 @@ static void detach_rules(struct list_head *rules, struct net_device *dev) static int fib_rules_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct fib_rules_ops *ops; diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 0777d0aa18c3..e533259dce3c 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -261,7 +261,7 @@ struct cgroup_subsys net_prio_subsys = { static int netprio_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netprio_map *old; /* diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 11f2704c3810..795498fd4587 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1921,7 +1921,7 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d static int pktgen_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id); if (pn->pktgen_exiting) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a08bd2b7fe3f..49c14451d8ab 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2667,7 +2667,7 @@ static void rtnetlink_rcv(struct sk_buff *skb) static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_UP: diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index c21f200eed93..dd4d506ef923 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -2078,9 +2078,9 @@ out_err: } static int dn_device_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 55e1fd5b3e56..3b9d5f20bd1c 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -1352,10 +1352,9 @@ static inline void lowpan_netlink_fini(void) } static int lowpan_device_event(struct notifier_block *unused, - unsigned long event, - void *ptr) + unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); LIST_HEAD(del_list); struct lowpan_dev_record *entry, *tmp; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 247ec1951c35..bf574029a183 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1234,7 +1234,7 @@ out: static int arp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_CHANGEADDR: diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index dfc39d4d48b7..b047e2d8a614 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1333,7 +1333,7 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev, static int inetdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c7629a209f9d..05a4888dede9 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1038,7 +1038,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct in_device *in_dev; struct net *net = dev_net(dev); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d9610ae7855..f975399f3522 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1609,7 +1609,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct mr_table *mrt; struct vif_device *v; diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 5d5d4d1be9c2..dd5508bde799 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -108,7 +108,7 @@ static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - const struct net_device *dev = ptr; + const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 432e084b6b62..bce073b4bbd4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2826,9 +2826,9 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) } static int addrconf_notify(struct notifier_block *this, unsigned long event, - void *data) + void *ptr) { - struct net_device *dev = (struct net_device *) data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct inet6_dev *idev = __in6_dev_get(dev); int run_pending = 0; int err; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 241fb8ad9fcf..583e8d435f9a 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1319,7 +1319,7 @@ static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc, static int ip6mr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct mr6_table *mrt; struct mif_device *v; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 2712ab22a174..a0962697a257 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1568,7 +1568,7 @@ int ndisc_rcv(struct sk_buff *skb) static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct inet6_dev *idev; diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c index 60e9053bab05..b76257cd7e1e 100644 --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c @@ -71,7 +71,7 @@ static int device_cmp(struct nf_conn *ct, void *ifindex) static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - const struct net_device *dev = ptr; + const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index ad0aa6b0b86a..194c3cde1536 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2681,9 +2681,9 @@ errout: } static int ip6_route_dev_notify(struct notifier_block *this, - unsigned long event, void *data) + unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index f547a47d381c..7a1e0fc1bd4d 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -330,7 +330,7 @@ static __inline__ void __ipxitf_put(struct ipx_interface *intrfc) static int ipxitf_device_event(struct notifier_block *notifier, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ipx_interface *i, *tmp; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index ae691651b721..168aff5e60de 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -2293,7 +2293,7 @@ out_unlock: static int afiucv_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *event_dev = (struct net_device *)ptr; + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); struct sock *sk; struct iucv_sock *iucv; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 60f1ce5e5e52..d2c3fd178dbe 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1717,10 +1717,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) } static int netdev_notify(struct notifier_block *nb, - unsigned long state, - void *ndev) + unsigned long state, void *ptr) { - struct net_device *dev = ndev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ieee80211_sub_if_data *sdata; if (state != NETDEV_CHANGENAME) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 5b142fb16480..7c3ed429789e 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1487,9 +1487,9 @@ ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev) * Currently only NETDEV_DOWN is handled to release refs to cached dsts */ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_service *svc; diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 4e27fa035814..0f2ac8f2e7b7 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -800,7 +800,7 @@ static int nfqnl_rcv_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index bd93e51d30ac..292934d23482 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -200,7 +200,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) static int tee_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct xt_tee_priv *priv; priv = container_of(this, struct xt_tee_priv, notifier); diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 8a6c6ea466d8..af3531926ee0 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -708,7 +708,7 @@ unlhsh_remove_return: * netlbl_unlhsh_netdev_handler - Network device notification handler * @this: notifier block * @event: the event - * @ptr: the network device (cast to void) + * @ptr: the netdevice notifier info (cast to void) * * Description: * Handle network device events, although at present all we care about is a @@ -717,10 +717,9 @@ unlhsh_remove_return: * */ static int netlbl_unlhsh_netdev_handler(struct notifier_block *this, - unsigned long event, - void *ptr) + unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netlbl_unlhsh_iface *iface = NULL; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index ec0c80fde69f..698814bfa7ad 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -117,7 +117,7 @@ static void nr_kill_by_device(struct net_device *dev) */ static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index ef4feec6cd84..c3235675f359 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c @@ -78,7 +78,7 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct ovs_net *ovs_net; - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vport *vport = NULL; if (!ovs_is_internal_dev(dev)) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8ec1bca7f859..79fe63246b27 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3331,10 +3331,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, } -static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) +static int packet_notifier(struct notifier_block *this, + unsigned long msg, void *ptr) { struct sock *sk; - struct net_device *dev = data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); rcu_read_lock(); diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 45a7df6575de..56a6146ac94b 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -292,9 +292,9 @@ static void phonet_route_autodel(struct net_device *dev) /* notify Phonet of device events */ static int phonet_device_notify(struct notifier_block *me, unsigned long what, - void *arg) + void *ptr) { - struct net_device *dev = arg; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (what) { case NETDEV_REGISTER: diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 9c8347451597..e98fcfbe6007 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -202,10 +202,10 @@ static void rose_kill_by_device(struct net_device *dev) /* * Handle device status changes. */ -static int rose_device_event(struct notifier_block *this, unsigned long event, - void *ptr) +static int rose_device_event(struct notifier_block *this, + unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 5d676edc22a6..977c10e0631b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -243,7 +243,7 @@ nla_put_failure: static int mirred_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct tcf_mirred *m; if (event == NETDEV_UNREGISTER) diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 120a676a3360..fc60bea63169 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -251,9 +251,9 @@ static void disable_bearer(struct tipc_bearer *tb_ptr) * specified device. */ static int recv_notification(struct notifier_block *nb, unsigned long evt, - void *dv) + void *ptr) { - struct net_device *dev = (struct net_device *)dv; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct eth_bearer *eb_ptr = ð_bearers[0]; struct eth_bearer *stop = ð_bearers[MAX_ETH_BEARERS]; diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c index 2a2864c25e15..baa9df4327d9 100644 --- a/net/tipc/ib_media.c +++ b/net/tipc/ib_media.c @@ -244,9 +244,9 @@ static void disable_bearer(struct tipc_bearer *tb_ptr) * specified device. */ static int recv_notification(struct notifier_block *nb, unsigned long evt, - void *dv) + void *ptr) { - struct net_device *dev = (struct net_device *)dv; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ib_bearer *ib_ptr = &ib_bearers[0]; struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS]; diff --git a/net/wireless/core.c b/net/wireless/core.c index 73405e00c800..01e41191f1bf 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -886,10 +886,9 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, } static int cfg80211_netdev_notifier_call(struct notifier_block *nb, - unsigned long state, - void *ndev) + unsigned long state, void *ptr) { - struct net_device *dev = ndev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; int ret; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 37ca9694aabe..1d964e23853f 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -224,7 +224,7 @@ static void x25_kill_by_device(struct net_device *dev) static int x25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 23cea0f74336..536ccc95de89 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2784,7 +2784,7 @@ static void __net_init xfrm_dst_ops_init(struct net *net) static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_DOWN: diff --git a/security/selinux/netif.c b/security/selinux/netif.c index 47a49d1a6f6a..694e9e43855f 100644 --- a/security/selinux/netif.c +++ b/security/selinux/netif.c @@ -264,7 +264,7 @@ static int sel_netif_avc_callback(u32 event) static int sel_netif_netdev_notifier_handler(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev_net(dev) != &init_net) return NOTIFY_DONE; -- cgit v1.2.3 From be9efd3653284f2827fd82861e8e9db9a8f726e1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 28 May 2013 01:30:22 +0000 Subject: net: pass changed flags along with NETDEV_CHANGE event MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use new netdevice notifier infrastructure to pass along changed flags. Signed-off-by: Timo Teräs Signed-off-by: Jiri Pirko v2->v3: shortened notifier_info struct name Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 +++++ net/core/dev.c | 9 +++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 13a34848b5e1..850271809a9e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1604,6 +1604,11 @@ struct netdev_notifier_info { struct net_device *dev; }; +struct netdev_notifier_change_info { + struct netdev_notifier_info info; /* must be first */ + unsigned int flags_changed; +}; + static inline struct net_device * netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) { diff --git a/net/core/dev.c b/net/core/dev.c index 54fce6006a83..6eb621cc3b81 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4771,8 +4771,13 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) } if (dev->flags & IFF_UP && - (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) - call_netdevice_notifiers(NETDEV_CHANGE, dev); + (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { + struct netdev_notifier_change_info change_info; + + change_info.flags_changed = changes; + call_netdevice_notifiers_info(NETDEV_CHANGE, dev, + &change_info.info); + } } /** -- cgit v1.2.3 From 75538c2b85cf22eb9af6adfaf26ed7219025adeb Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 29 May 2013 11:30:50 +0800 Subject: net: always pass struct netdev_notifier_info to netdevice notifiers commit 351638e7deeed2ec8ce451b53d3 (net: pass info struct via netdevice notifier) breaks booting of my KVM guest, this is due to we still forget to pass struct netdev_notifier_info in several places. This patch completes it. Cc: Jiri Pirko Cc: David S. Miller Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/netdevice.h | 6 ++++++ net/atm/clip.c | 4 +++- net/core/dev.c | 6 ------ net/ipv4/netfilter/ipt_MASQUERADE.c | 5 ++++- net/ipv6/addrconf.c | 7 +++++-- net/ipv6/netfilter/ip6t_MASQUERADE.c | 4 +++- 6 files changed, 21 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 850271809a9e..8f967e34142b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1609,6 +1609,12 @@ struct netdev_notifier_change_info { unsigned int flags_changed; }; +static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, + struct net_device *dev) +{ + info->dev = dev; +} + static inline struct net_device * netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) { diff --git a/net/atm/clip.c b/net/atm/clip.c index cce241eb01d9..8215f7cb170b 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -575,6 +575,7 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event, void *ifa) { struct in_device *in_dev; + struct netdev_notifier_info info; in_dev = ((struct in_ifaddr *)ifa)->ifa_dev; /* @@ -583,7 +584,8 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event, */ if (event != NETDEV_UP) return NOTIFY_DONE; - return clip_device_event(this, NETDEV_CHANGE, in_dev->dev); + netdev_notifier_info_init(&info, in_dev->dev); + return clip_device_event(this, NETDEV_CHANGE, &info); } static struct notifier_block clip_dev_notifier = { diff --git a/net/core/dev.c b/net/core/dev.c index 6eb621cc3b81..b2e9057be3bf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1391,12 +1391,6 @@ void dev_disable_lro(struct net_device *dev) } EXPORT_SYMBOL(dev_disable_lro); -static void netdev_notifier_info_init(struct netdev_notifier_info *info, - struct net_device *dev) -{ - info->dev = dev; -} - static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, struct net_device *dev) { diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index dd5508bde799..30e4de940567 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -129,7 +129,10 @@ static int masq_inet_event(struct notifier_block *this, void *ptr) { struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; - return masq_device_event(this, event, dev); + struct netdev_notifier_info info; + + netdev_notifier_info_init(&info, dev); + return masq_device_event(this, event, &info); } static struct notifier_block masq_dev_notifier = { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index bce073b4bbd4..7b34f06af344 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4645,13 +4645,16 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, static void dev_disable_change(struct inet6_dev *idev) { + struct netdev_notifier_info info; + if (!idev || !idev->dev) return; + netdev_notifier_info_init(&info, idev->dev); if (idev->cnf.disable_ipv6) - addrconf_notify(NULL, NETDEV_DOWN, idev->dev); + addrconf_notify(NULL, NETDEV_DOWN, &info); else - addrconf_notify(NULL, NETDEV_UP, idev->dev); + addrconf_notify(NULL, NETDEV_UP, &info); } static void addrconf_disable_change(struct net *net, __s32 newf) diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c index b76257cd7e1e..47bff6107519 100644 --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c @@ -89,8 +89,10 @@ static int masq_inet_event(struct notifier_block *this, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = ptr; + struct netdev_notifier_info info; - return masq_device_event(this, event, ifa->idev->dev); + netdev_notifier_info_init(&info, ifa->idev->dev); + return masq_device_event(this, event, &info); } static struct notifier_block masq_inet_notifier = { -- cgit v1.2.3 From ced14f6804a979d1972415bc23f2f8ddb18595dd Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 28 May 2013 20:34:25 +0000 Subject: net: Correct comparisons and calculations using skb->tail and skb-transport_header This corrects an regression introduced by "net: Use 16bits for *_headers fields of struct skbuff" when NET_SKBUFF_DATA_USES_OFFSET is not set. In that case skb->tail will be a pointer whereas skb->transport_header will be an offset from head. This is corrected by using wrappers that ensure that comparisons and calculations are always made using pointers. Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- include/net/inet_ecn.h | 6 ++++-- net/core/dev.c | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index aab73757bc4d..3bd22795c3e2 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -134,12 +134,14 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) { switch (skb->protocol) { case cpu_to_be16(ETH_P_IP): - if (skb->network_header + sizeof(struct iphdr) <= skb->tail) + if (skb_network_header(skb) + sizeof(struct iphdr) <= + skb_tail_pointer(skb)) return IP_ECN_set_ce(ip_hdr(skb)); break; case cpu_to_be16(ETH_P_IPV6): - if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail) + if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= + skb_tail_pointer(skb)) return IP6_ECN_set_ce(ipv6_hdr(skb)); break; } diff --git a/net/core/dev.c b/net/core/dev.c index b2e9057be3bf..d4d874a25e45 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1724,7 +1724,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb2); if (skb_network_header(skb2) < skb2->data || - skb2->network_header > skb2->tail) { + skb_network_header(skb2) > skb_tail_pointer(skb2)) { net_crit_ratelimited("protocol %04x is buggy, dev %s\n", ntohs(skb2->protocol), dev->name); @@ -3892,7 +3892,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb) NAPI_GRO_CB(skb)->frag0 = NULL; NAPI_GRO_CB(skb)->frag0_len = 0; - if (skb->mac_header == skb->tail && + if (skb_mac_header(skb) == skb_tail_pointer(skb) && pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0))) { NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); -- cgit v1.2.3 From 7cc461900549fc480eb133948649a1edb7eaaa6f Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 28 May 2013 20:34:29 +0000 Subject: net, ipv4, ipv6: Correct assignment of skb->network_header to skb->tail This corrects an regression introduced by "net: Use 16bits for *_headers fields of struct skbuff" when NET_SKBUFF_DATA_USES_OFFSET is not set. In that case skb->tail will be a pointer however skb->network_header is now an offset. This patch corrects the problem by adding a wrapper to return skb tail as an offset regardless of the value of NET_SKBUFF_DATA_USES_OFFSET. It seems that skb->tail that this offset may be more than 64k and some care has been taken to treat such cases as an error. Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +++++++++ net/core/netpoll.c | 9 ++++++++- net/core/pktgen.c | 16 ++++++++++++++-- net/ipv4/ipmr.c | 8 +++++++- 4 files changed, 38 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8f2b830772a8..5f931191cf57 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1391,6 +1391,11 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) skb_reset_tail_pointer(skb); skb->tail += offset; } + +static inline unsigned long skb_tail_offset(const struct sk_buff *skb) +{ + return skb->tail; +} #else /* NET_SKBUFF_DATA_USES_OFFSET */ static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) { @@ -1407,6 +1412,10 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) skb->tail = skb->data + offset; } +static inline unsigned long skb_tail_offset(const struct sk_buff *skb) +{ + return skb->tail - skb->head; +} #endif /* NET_SKBUFF_DATA_USES_OFFSET */ /* diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 37deedd48bcc..688517c7ff17 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -676,6 +676,8 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo spin_lock_irqsave(&npinfo->rx_lock, flags); list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { + unsigned long tail_offset; + if (!ipv6_addr_equal(daddr, &np->local_ip.in6)) continue; @@ -700,7 +702,12 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo hdr->saddr = *saddr; hdr->daddr = *daddr; - send_skb->transport_header = send_skb->tail; + tail_offset = skb_tail_offset(skb); + if (tail_offset > 0xffff) { + kfree_skb(send_skb); + continue; + } + skb_set_network_header(send_skb, tail_offset); skb_put(send_skb, size); icmp6h = (struct icmp6hdr *)skb_transport_header(skb); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 795498fd4587..d2ede89662be 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2642,6 +2642,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ u16 queue_map; + unsigned long tail_offset; if (pkt_dev->nr_labels) protocol = htons(ETH_P_MPLS_UC); @@ -2708,7 +2709,12 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, *vlan_encapsulated_proto = htons(ETH_P_IP); } - skb->network_header = skb->tail; + tail_offset = skb_tail_offset(skb); + if (tail_offset > 0xffff) { + kfree_skb(skb); + return NULL; + } + skb_set_network_header(skb, tail_offset); skb->transport_header = skb->network_header + sizeof(struct iphdr); skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); skb_set_queue_mapping(skb, queue_map); @@ -2775,6 +2781,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ u16 queue_map; + unsigned long tail_offset; if (pkt_dev->nr_labels) protocol = htons(ETH_P_MPLS_UC); @@ -2822,7 +2829,12 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, *vlan_encapsulated_proto = htons(ETH_P_IPV6); } - skb->network_header = skb->tail; + tail_offset = skb_tail_offset(skb); + if (tail_offset > 0xffff) { + kfree_skb(skb); + return NULL; + } + skb_set_network_header(skb, tail_offset); skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); skb_set_queue_mapping(skb, queue_map); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index f975399f3522..df97f0ac1a1c 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -945,6 +945,7 @@ static int ipmr_cache_report(struct mr_table *mrt, struct igmpmsg *msg; struct sock *mroute_sk; int ret; + unsigned long tail_offset; #ifdef CONFIG_IP_PIMSM if (assert == IGMPMSG_WHOLEPKT) @@ -980,7 +981,12 @@ static int ipmr_cache_report(struct mr_table *mrt, /* Copy the IP header */ - skb->network_header = skb->tail; + tail_offset = skb_tail_offset(skb); + if (tail_offset > 0xffff) { + kfree_skb(skb); + return -EINVAL; + } + skb_set_network_header(skb, tail_offset); skb_put(skb, ihl); skb_copy_to_linear_data(skb, pkt->data, ihl); ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ -- cgit v1.2.3 From e057d3c31bdf87616b415c4b2cbf7310f54b9219 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 28 May 2013 13:01:52 +0200 Subject: cfg80211: support an active monitor interface flag An active monitor interface is one that is used for communication (via injection). It is expected to ACK incoming unicast packets. This is useful for running various 802.11 testing utilities that associate to an AP via injection and manage the state in user space. Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 4 ++++ net/wireless/nl80211.c | 10 ++++++++++ 3 files changed, 16 insertions(+) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b3b076a46d50..13b247d26544 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -961,6 +961,7 @@ struct station_info { * @MONITOR_FLAG_CONTROL: pass control frames * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering * @MONITOR_FLAG_COOK_FRAMES: report frames after processing + * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address */ enum monitor_flags { MONITOR_FLAG_FCSFAIL = 1<wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) + return -EOPNOTSUPP; + if (change) err = cfg80211_change_iface(rdev, dev, ntype, flags, ¶ms); else @@ -2395,6 +2400,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, &flags); + + if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) && + !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) + return -EOPNOTSUPP; + wdev = rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL80211_ATTR_IFNAME]), type, err ? NULL : &flags, ¶ms); -- cgit v1.2.3 From bd5e14fb77d9d1dd15f9102759e8c8b31c667488 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 29 May 2013 09:08:05 +0200 Subject: cfg80211: remove cleanup_work kernel-doc I evidently forgot this when removing the work itself. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 13b247d26544..6dd19593e333 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2869,7 +2869,6 @@ struct cfg80211_cached_keys; * @mgmt_registrations_lock: lock for the list * @mtx: mutex used to lock data in this struct, may be used by drivers * and some API functions require it held - * @cleanup_work: work struct used for cleanup that can't be done directly * @beacon_interval: beacon interval used on this device for transmitting * beacons, 0 when not valid * @address: The address for this device, valid only if @netdev is %NULL -- cgit v1.2.3 From a910e4a94f6923c8c988565525f017f687bf7205 Mon Sep 17 00:00:00 2001 From: Solomon Peachy Date: Fri, 24 May 2013 20:04:38 -0400 Subject: cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets Signed-off-by: Solomon Peachy Signed-off-by: John W. Linville --- MAINTAINERS | 5 + drivers/net/wireless/Kconfig | 1 + drivers/net/wireless/Makefile | 2 + drivers/net/wireless/cw1200/Kconfig | 46 + drivers/net/wireless/cw1200/Makefile | 24 + drivers/net/wireless/cw1200/bh.c | 616 +++++++ drivers/net/wireless/cw1200/bh.h | 28 + drivers/net/wireless/cw1200/cw1200.h | 332 ++++ drivers/net/wireless/cw1200/cw1200_sagrad.c | 145 ++ drivers/net/wireless/cw1200/cw1200_sdio.c | 409 +++++ drivers/net/wireless/cw1200/cw1200_spi.c | 480 ++++++ drivers/net/wireless/cw1200/debug.c | 664 ++++++++ drivers/net/wireless/cw1200/debug.h | 98 ++ drivers/net/wireless/cw1200/fwio.c | 525 ++++++ drivers/net/wireless/cw1200/fwio.h | 39 + drivers/net/wireless/cw1200/hwio.c | 311 ++++ drivers/net/wireless/cw1200/hwio.h | 247 +++ drivers/net/wireless/cw1200/itp.c | 730 ++++++++ drivers/net/wireless/cw1200/itp.h | 144 ++ drivers/net/wireless/cw1200/main.c | 618 +++++++ drivers/net/wireless/cw1200/pm.c | 367 ++++ drivers/net/wireless/cw1200/pm.h | 38 + drivers/net/wireless/cw1200/queue.c | 583 +++++++ drivers/net/wireless/cw1200/queue.h | 116 ++ drivers/net/wireless/cw1200/sbus.h | 37 + drivers/net/wireless/cw1200/scan.c | 461 +++++ drivers/net/wireless/cw1200/scan.h | 56 + drivers/net/wireless/cw1200/sta.c | 2406 +++++++++++++++++++++++++++ drivers/net/wireless/cw1200/sta.h | 123 ++ drivers/net/wireless/cw1200/txrx.c | 1474 ++++++++++++++++ drivers/net/wireless/cw1200/txrx.h | 106 ++ drivers/net/wireless/cw1200/wsm.c | 1884 +++++++++++++++++++++ drivers/net/wireless/cw1200/wsm.h | 1879 +++++++++++++++++++++ include/linux/cw1200_platform.h | 46 + 34 files changed, 15040 insertions(+) create mode 100644 drivers/net/wireless/cw1200/Kconfig create mode 100644 drivers/net/wireless/cw1200/Makefile create mode 100644 drivers/net/wireless/cw1200/bh.c create mode 100644 drivers/net/wireless/cw1200/bh.h create mode 100644 drivers/net/wireless/cw1200/cw1200.h create mode 100644 drivers/net/wireless/cw1200/cw1200_sagrad.c create mode 100644 drivers/net/wireless/cw1200/cw1200_sdio.c create mode 100644 drivers/net/wireless/cw1200/cw1200_spi.c create mode 100644 drivers/net/wireless/cw1200/debug.c create mode 100644 drivers/net/wireless/cw1200/debug.h create mode 100644 drivers/net/wireless/cw1200/fwio.c create mode 100644 drivers/net/wireless/cw1200/fwio.h create mode 100644 drivers/net/wireless/cw1200/hwio.c create mode 100644 drivers/net/wireless/cw1200/hwio.h create mode 100644 drivers/net/wireless/cw1200/itp.c create mode 100644 drivers/net/wireless/cw1200/itp.h create mode 100644 drivers/net/wireless/cw1200/main.c create mode 100644 drivers/net/wireless/cw1200/pm.c create mode 100644 drivers/net/wireless/cw1200/pm.h create mode 100644 drivers/net/wireless/cw1200/queue.c create mode 100644 drivers/net/wireless/cw1200/queue.h create mode 100644 drivers/net/wireless/cw1200/sbus.h create mode 100644 drivers/net/wireless/cw1200/scan.c create mode 100644 drivers/net/wireless/cw1200/scan.h create mode 100644 drivers/net/wireless/cw1200/sta.c create mode 100644 drivers/net/wireless/cw1200/sta.h create mode 100644 drivers/net/wireless/cw1200/txrx.c create mode 100644 drivers/net/wireless/cw1200/txrx.h create mode 100644 drivers/net/wireless/cw1200/wsm.c create mode 100644 drivers/net/wireless/cw1200/wsm.h create mode 100644 include/linux/cw1200_platform.h (limited to 'include') diff --git a/MAINTAINERS b/MAINTAINERS index 3d7782b9f90d..e5069c223391 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2299,6 +2299,11 @@ M: Jaya Kumar S: Maintained F: sound/pci/cs5535audio/ +CW1200 WLAN driver +M: Solomon Peachy +S: Maintained +F: drivers/net/wireless/cw1200/ + CX18 VIDEO4LINUX DRIVER M: Andy Walls L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers) diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index f8f0156dff4e..200020eb3005 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -280,5 +280,6 @@ source "drivers/net/wireless/rtlwifi/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" source "drivers/net/wireless/mwifiex/Kconfig" +source "drivers/net/wireless/cw1200/Kconfig" endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 67156efe14c4..0fab227025be 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -57,3 +57,5 @@ obj-$(CONFIG_MWIFIEX) += mwifiex/ obj-$(CONFIG_BRCMFMAC) += brcm80211/ obj-$(CONFIG_BRCMSMAC) += brcm80211/ + +obj-$(CONFIG_CW1200) += cw1200/ diff --git a/drivers/net/wireless/cw1200/Kconfig b/drivers/net/wireless/cw1200/Kconfig new file mode 100644 index 000000000000..13e36119ae9f --- /dev/null +++ b/drivers/net/wireless/cw1200/Kconfig @@ -0,0 +1,46 @@ +config CW1200 + tristate "CW1200 WLAN support" + depends on MAC80211 && CFG80211 + help + This is a driver for the ST-E CW1100 & CW1200 WLAN chipsets. + This option just enables the driver core, see below for + specific bus support. + +if CW1200 + +config CW1200_WLAN_SDIO + tristate "Support SDIO platforms" + depends on CW1200 && MMC + help + Enable support for the CW1200 connected via an SDIO bus. + +config CW1200_WLAN_SPI + tristate "Support SPI platforms" + depends on CW1200 && SPI + help + Enables support for the CW1200 connected via a SPI bus. + +config CW1200_WLAN_SAGRAD + tristate "Support Sagrad SG901-1091/1098 modules" + depends on CW1200_WLAN_SDIO + help + This provides the platform data glue to support the + Sagrad SG901-1091/1098 modules in their standard SDIO EVK. + It also includes example SPI platform data. + +menu "Driver debug features" + depends on CW1200 && DEBUG_FS + +config CW1200_ETF + bool "Enable CW1200 Engineering Test Framework hooks" + help + If you don't know what this is, just say N. + +config CW1200_ITP + bool "Enable ITP access" + help + If you don't know what this is, just say N. + +endmenu + +endif diff --git a/drivers/net/wireless/cw1200/Makefile b/drivers/net/wireless/cw1200/Makefile new file mode 100644 index 000000000000..c19737276be2 --- /dev/null +++ b/drivers/net/wireless/cw1200/Makefile @@ -0,0 +1,24 @@ +cw1200_core-y := \ + fwio.o \ + txrx.o \ + main.o \ + queue.o \ + hwio.o \ + bh.o \ + wsm.o \ + sta.o \ + scan.o \ + pm.o \ + debug.o +cw1200_core-$(CONFIG_CW1200_ITP) += itp.o + +# CFLAGS_sta.o += -DDEBUG + +cw1200_wlan_sdio-y := cw1200_sdio.o +cw1200_wlan_spi-y := cw1200_spi.o +cw1200_wlan_sagrad-y := cw1200_sagrad.o + +obj-$(CONFIG_CW1200) += cw1200_core.o +obj-$(CONFIG_CW1200_WLAN_SDIO) += cw1200_wlan_sdio.o +obj-$(CONFIG_CW1200_WLAN_SPI) += cw1200_wlan_spi.o +obj-$(CONFIG_CW1200_WLAN_SAGRAD) += cw1200_wlan_sagrad.o diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c new file mode 100644 index 000000000000..cf7375f92fb3 --- /dev/null +++ b/drivers/net/wireless/cw1200/bh.c @@ -0,0 +1,616 @@ +/* + * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * Based on: + * ST-Ericsson UMAC CW1200 driver, which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "cw1200.h" +#include "bh.h" +#include "hwio.h" +#include "wsm.h" +#include "sbus.h" +#include "debug.h" +#include "fwio.h" + +static int cw1200_bh(void *arg); + +#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4) +/* an SPI message cannot be bigger than (2"12-1)*2 bytes + * "*2" to cvt to bytes */ +#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2) +#define PIGGYBACK_CTRL_REG (2) +#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG) + +/* Suspend state privates */ +enum cw1200_bh_pm_state { + CW1200_BH_RESUMED = 0, + CW1200_BH_SUSPEND, + CW1200_BH_SUSPENDED, + CW1200_BH_RESUME, +}; + +typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv, + u8 *data, size_t size); + +static void cw1200_bh_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bh_work); + cw1200_bh(priv); +} + +int cw1200_register_bh(struct cw1200_common *priv) +{ + int err = 0; + /* Realtime workqueue */ + priv->bh_workqueue = alloc_workqueue("cw1200_bh", + WQ_MEM_RECLAIM | WQ_HIGHPRI + | WQ_CPU_INTENSIVE, 1); + + if (!priv->bh_workqueue) + return -ENOMEM; + + INIT_WORK(&priv->bh_work, cw1200_bh_work); + + pr_debug("[BH] register.\n"); + + atomic_set(&priv->bh_rx, 0); + atomic_set(&priv->bh_tx, 0); + atomic_set(&priv->bh_term, 0); + atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); + priv->bh_error = 0; + priv->hw_bufs_used = 0; + priv->buf_id_tx = 0; + priv->buf_id_rx = 0; + init_waitqueue_head(&priv->bh_wq); + init_waitqueue_head(&priv->bh_evt_wq); + + err = !queue_work(priv->bh_workqueue, &priv->bh_work); + WARN_ON(err); + return err; +} + +void cw1200_unregister_bh(struct cw1200_common *priv) +{ + atomic_add(1, &priv->bh_term); + wake_up(&priv->bh_wq); + + flush_workqueue(priv->bh_workqueue); + + destroy_workqueue(priv->bh_workqueue); + priv->bh_workqueue = NULL; + + pr_debug("[BH] unregistered.\n"); +} + +void cw1200_irq_handler(struct cw1200_common *priv) +{ + pr_debug("[BH] irq.\n"); + + /* Disable Interrupts! */ + /* NOTE: sbus_ops->lock already held */ + __cw1200_irq_enable(priv, 0); + + if (/* WARN_ON */(priv->bh_error)) + return; + + if (atomic_add_return(1, &priv->bh_rx) == 1) + wake_up(&priv->bh_wq); +} +EXPORT_SYMBOL_GPL(cw1200_irq_handler); + +void cw1200_bh_wakeup(struct cw1200_common *priv) +{ + pr_debug("[BH] wakeup.\n"); + if (priv->bh_error) { + pr_err("[BH] wakeup failed (BH error)\n"); + return; + } + + if (atomic_add_return(1, &priv->bh_tx) == 1) + wake_up(&priv->bh_wq); +} + +int cw1200_bh_suspend(struct cw1200_common *priv) +{ + pr_debug("[BH] suspend.\n"); + if (priv->bh_error) { + wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n"); + return -EINVAL; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND); + wake_up(&priv->bh_wq); + return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || + (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)), + 1 * HZ) ? 0 : -ETIMEDOUT; +} + +int cw1200_bh_resume(struct cw1200_common *priv) +{ + pr_debug("[BH] resume.\n"); + if (priv->bh_error) { + wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n"); + return -EINVAL; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_RESUME); + wake_up(&priv->bh_wq); + return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || + (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)), + 1 * HZ) ? 0 : -ETIMEDOUT; +} + +static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv) +{ + ++priv->hw_bufs_used; +} + +int wsm_release_tx_buffer(struct cw1200_common *priv, int count) +{ + int ret = 0; + int hw_bufs_used = priv->hw_bufs_used; + + priv->hw_bufs_used -= count; + if (WARN_ON(priv->hw_bufs_used < 0)) + ret = -1; + else if (hw_bufs_used >= priv->wsm_caps.input_buffers) + ret = 1; + if (!priv->hw_bufs_used) + wake_up(&priv->bh_evt_wq); + return ret; +} + +static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv, + u16 *ctrl_reg) +{ + int ret; + + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, ctrl_reg); + if (ret) { + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, ctrl_reg); + if (ret) + pr_err("[BH] Failed to read control register.\n"); + } + + return ret; +} + +static int cw1200_device_wakeup(struct cw1200_common *priv) +{ + u16 ctrl_reg; + int ret; + + pr_debug("[BH] Device wakeup.\n"); + + /* First, set the dpll register */ + ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, + cw1200_dpll_from_clk(priv->hw_refclk)); + if (WARN_ON(ret)) + return ret; + + /* To force the device to be always-on, the host sets WLAN_UP to 1 */ + ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, + ST90TDS_CONT_WUP_BIT); + if (WARN_ON(ret)) + return ret; + + ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg); + if (WARN_ON(ret)) + return ret; + + /* If the device returns WLAN_RDY as 1, the device is active and will + * remain active. */ + if (ctrl_reg & ST90TDS_CONT_RDY_BIT) { + pr_debug("[BH] Device awake.\n"); + return 1; + } + + return 0; +} + +/* Must be called from BH thraed. */ +void cw1200_enable_powersave(struct cw1200_common *priv, + bool enable) +{ + pr_debug("[BH] Powerave is %s.\n", + enable ? "enabled" : "disabled"); + priv->powersave_enabled = enable; +} + +static int cw1200_bh_rx_helper(struct cw1200_common *priv, + uint16_t *ctrl_reg, + int *tx) +{ + size_t read_len = 0; + struct sk_buff *skb_rx = NULL; + struct wsm_hdr *wsm; + size_t wsm_len; + u16 wsm_id; + u8 wsm_seq; + int rx_resync = 1; + + size_t alloc_len; + u8 *data; + + read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2; + if (!read_len) + return 0; /* No more work */ + + if (WARN_ON((read_len < sizeof(struct wsm_hdr)) || + (read_len > EFFECTIVE_BUF_SIZE))) { + pr_debug("Invalid read len: %zu (%04x)", + read_len, *ctrl_reg); + goto err; + } + + /* Add SIZE of PIGGYBACK reg (CONTROL Reg) + * to the NEXT Message length + 2 Bytes for SKB */ + read_len = read_len + 2; + + alloc_len = priv->sbus_ops->align_size( + priv->sbus_priv, read_len); + + /* Check if not exceeding CW1200 capabilities */ + if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) { + pr_debug("Read aligned len: %zu\n", + alloc_len); + } + + skb_rx = dev_alloc_skb(alloc_len); + if (WARN_ON(!skb_rx)) + goto err; + + skb_trim(skb_rx, 0); + skb_put(skb_rx, read_len); + data = skb_rx->data; + if (WARN_ON(!data)) + goto err; + + if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) { + pr_err("rx blew up, len %zu\n", alloc_len); + goto err; + } + + /* Piggyback */ + *ctrl_reg = __le16_to_cpu( + ((__le16 *)data)[alloc_len / 2 - 1]); + + wsm = (struct wsm_hdr *)data; + wsm_len = __le16_to_cpu(wsm->len); + if (WARN_ON(wsm_len > read_len)) + goto err; + + if (priv->wsm_enable_wsm_dumps) + print_hex_dump_bytes("<-- ", + DUMP_PREFIX_NONE, + data, wsm_len); + + wsm_id = __le16_to_cpu(wsm->id) & 0xFFF; + wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7; + + skb_trim(skb_rx, wsm_len); + + if (wsm_id == 0x0800) { + wsm_handle_exception(priv, + &data[sizeof(*wsm)], + wsm_len - sizeof(*wsm)); + goto err; + } else if (!rx_resync) { + if (WARN_ON(wsm_seq != priv->wsm_rx_seq)) + goto err; + } + priv->wsm_rx_seq = (wsm_seq + 1) & 7; + rx_resync = 0; + + if (wsm_id & 0x0400) { + int rc = wsm_release_tx_buffer(priv, 1); + if (WARN_ON(rc < 0)) + return rc; + else if (rc > 0) + *tx = 1; + } + + /* cw1200_wsm_rx takes care on SKB livetime */ + if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx))) + goto err; + + if (skb_rx) { + dev_kfree_skb(skb_rx); + skb_rx = NULL; + } + + return 0; + +err: + if (skb_rx) { + dev_kfree_skb(skb_rx); + skb_rx = NULL; + } + return -1; +} + +static int cw1200_bh_tx_helper(struct cw1200_common *priv, + int *pending_tx, + int *tx_burst) +{ + size_t tx_len; + u8 *data; + int ret; + struct wsm_hdr *wsm; + + if (priv->device_can_sleep) { + ret = cw1200_device_wakeup(priv); + if (WARN_ON(ret < 0)) { /* Error in wakeup */ + *pending_tx = 1; + return 0; + } else if (ret) { /* Woke up */ + priv->device_can_sleep = false; + } else { /* Did not awake */ + *pending_tx = 1; + return 0; + } + } + + wsm_alloc_tx_buffer(priv); + ret = wsm_get_tx(priv, &data, &tx_len, tx_burst); + if (ret <= 0) { + wsm_release_tx_buffer(priv, 1); + if (WARN_ON(ret < 0)) + return ret; /* Error */ + return 0; /* No work */ + } + + wsm = (struct wsm_hdr *)data; + BUG_ON(tx_len < sizeof(*wsm)); + BUG_ON(__le16_to_cpu(wsm->len) != tx_len); + + atomic_add(1, &priv->bh_tx); + + tx_len = priv->sbus_ops->align_size( + priv->sbus_priv, tx_len); + + /* Check if not exceeding CW1200 capabilities */ + if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE)) + pr_debug("Write aligned len: %zu\n", tx_len); + + wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX)); + wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq)); + + if (WARN_ON(cw1200_data_write(priv, data, tx_len))) { + pr_err("tx blew up, len %zu\n", tx_len); + wsm_release_tx_buffer(priv, 1); + return -1; /* Error */ + } + + if (priv->wsm_enable_wsm_dumps) + print_hex_dump_bytes("--> ", + DUMP_PREFIX_NONE, + data, + __le16_to_cpu(wsm->len)); + + wsm_txed(priv, data); + priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX; + + if (*tx_burst > 1) { + cw1200_debug_tx_burst(priv); + return 1; /* Work remains */ + } + + return 0; +} + +static int cw1200_bh(void *arg) +{ + struct cw1200_common *priv = arg; + int rx, tx, term, suspend; + u16 ctrl_reg = 0; + int tx_allowed; + int pending_tx = 0; + int tx_burst; + long status; + u32 dummy; + int ret; + + for (;;) { + if (!priv->hw_bufs_used && + priv->powersave_enabled && + !priv->device_can_sleep && + !atomic_read(&priv->recent_scan)) { + status = 1 * HZ; + pr_debug("[BH] Device wakedown. No data.\n"); + cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } else if (priv->hw_bufs_used) { + /* Interrupt loss detection */ + status = 1 * HZ; + } else { + status = MAX_SCHEDULE_TIMEOUT; + } + + /* Dummy Read for SDIO retry mechanism*/ + if ((priv->hw_type != -1) && + (atomic_read(&priv->bh_rx) == 0) && + (atomic_read(&priv->bh_tx) == 0)) + cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID, + &dummy, sizeof(dummy)); + + pr_debug("[BH] waiting ...\n"); + status = wait_event_interruptible_timeout(priv->bh_wq, ({ + rx = atomic_xchg(&priv->bh_rx, 0); + tx = atomic_xchg(&priv->bh_tx, 0); + term = atomic_xchg(&priv->bh_term, 0); + suspend = pending_tx ? + 0 : atomic_read(&priv->bh_suspend); + (rx || tx || term || suspend || priv->bh_error); + }), status); + + pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n", + rx, tx, term, suspend, status); + + /* Did an error occur? */ + if ((status < 0 && status != -ERESTARTSYS) || + term || priv->bh_error) { + break; + } + if (!status) { /* wait_event timed out */ + unsigned long timestamp = jiffies; + long timeout; + int pending = 0; + int i; + + /* Check to see if we have any outstanding frames */ + if (priv->hw_bufs_used && (!rx || !tx)) { + wiphy_warn(priv->hw->wiphy, + "Missed interrupt? (%d frames outstanding)\n", + priv->hw_bufs_used); + rx = 1; + + /* Get a timestamp of "oldest" frame */ + for (i = 0; i < 4; ++i) + pending += cw1200_queue_get_xmit_timestamp( + &priv->tx_queue[i], + ×tamp, + priv->pending_frame_id); + + /* Check if frame transmission is timed out. + * Add an extra second with respect to possible + * interrupt loss. + */ + timeout = timestamp + + WSM_CMD_LAST_CHANCE_TIMEOUT + + 1 * HZ - + jiffies; + + /* And terminate BH thread if the frame is "stuck" */ + if (pending && timeout < 0) { + wiphy_warn(priv->hw->wiphy, + "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n", + priv->hw_bufs_used, pending, + timestamp, jiffies); + break; + } + } else if (!priv->device_can_sleep && + !atomic_read(&priv->recent_scan)) { + pr_debug("[BH] Device wakedown. Timeout.\n"); + cw1200_reg_write_16(priv, + ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } + goto done; + } else if (suspend) { + pr_debug("[BH] Device suspend.\n"); + if (priv->powersave_enabled) { + pr_debug("[BH] Device wakedown. Suspend.\n"); + cw1200_reg_write_16(priv, + ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED); + wake_up(&priv->bh_evt_wq); + status = wait_event_interruptible(priv->bh_wq, + CW1200_BH_RESUME == atomic_read(&priv->bh_suspend)); + if (status < 0) { + wiphy_err(priv->hw->wiphy, + "Failed to wait for resume: %ld.\n", + status); + break; + } + pr_debug("[BH] Device resume.\n"); + atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); + wake_up(&priv->bh_evt_wq); + atomic_add(1, &priv->bh_rx); + goto done; + } + + rx: + tx += pending_tx; + pending_tx = 0; + + if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) + break; + + /* Don't bother trying to rx unless we have data to read */ + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { + ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); + if (ret < 0) + break; + /* Double up here if there's more data.. */ + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { + ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); + if (ret < 0) + break; + } + } + + tx: + if (tx) { + tx = 0; + + BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers); + tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used; + tx_allowed = tx_burst > 0; + + if (!tx_allowed) { + /* Buffers full. Ensure we process tx + * after we handle rx.. + */ + pending_tx = tx; + goto done_rx; + } + ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst); + if (ret < 0) + break; + if (ret > 0) /* More to transmit */ + tx = ret; + + /* Re-read ctrl reg */ + if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) + break; + } + + done_rx: + if (priv->bh_error) + break; + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) + goto rx; + if (tx) + goto tx; + + done: + /* Re-enable device interrupts */ + priv->sbus_ops->lock(priv->sbus_priv); + __cw1200_irq_enable(priv, 1); + priv->sbus_ops->unlock(priv->sbus_priv); + } + + /* Explicitly disable device interrupts */ + priv->sbus_ops->lock(priv->sbus_priv); + __cw1200_irq_enable(priv, 0); + priv->sbus_ops->unlock(priv->sbus_priv); + + if (!term) { + pr_err("[BH] Fatal error, exiting.\n"); + priv->bh_error = 1; + /* TODO: schedule_work(recovery) */ + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/bh.h b/drivers/net/wireless/cw1200/bh.h new file mode 100644 index 000000000000..af6a4853728f --- /dev/null +++ b/drivers/net/wireless/cw1200/bh.h @@ -0,0 +1,28 @@ +/* + * Device handling thread interface for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_BH_H +#define CW1200_BH_H + +/* extern */ struct cw1200_common; + +int cw1200_register_bh(struct cw1200_common *priv); +void cw1200_unregister_bh(struct cw1200_common *priv); +void cw1200_irq_handler(struct cw1200_common *priv); +void cw1200_bh_wakeup(struct cw1200_common *priv); +int cw1200_bh_suspend(struct cw1200_common *priv); +int cw1200_bh_resume(struct cw1200_common *priv); +/* Must be called from BH thread. */ +void cw1200_enable_powersave(struct cw1200_common *priv, + bool enable); +int wsm_release_tx_buffer(struct cw1200_common *priv, int count); + +#endif /* CW1200_BH_H */ diff --git a/drivers/net/wireless/cw1200/cw1200.h b/drivers/net/wireless/cw1200/cw1200.h new file mode 100644 index 000000000000..2aa17ca60ba8 --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200.h @@ -0,0 +1,332 @@ +/* + * Common private data for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * Based on the mac80211 Prism54 code, which is + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_H +#define CW1200_H + +#include +#include +#include +#include +#include + +#include "queue.h" +#include "wsm.h" +#include "scan.h" +#include "txrx.h" +#include "pm.h" + +/* Forward declarations */ +struct sbus_ops; +struct task_struct; +struct cw1200_debug_priv; +struct firmware; + +#ifdef CONFIG_CW1200_ETF +extern int etf_mode; +extern char *etf_firmware; +#endif + +#define CW1200_MAX_CTRL_FRAME_LEN (0x1000) + +#define CW1200_MAX_STA_IN_AP_MODE (5) +#define CW1200_LINK_ID_AFTER_DTIM (CW1200_MAX_STA_IN_AP_MODE + 1) +#define CW1200_LINK_ID_UAPSD (CW1200_MAX_STA_IN_AP_MODE + 2) +#define CW1200_LINK_ID_MAX (CW1200_MAX_STA_IN_AP_MODE + 3) +#define CW1200_MAX_REQUEUE_ATTEMPTS (5) + +#define CW1200_MAX_TID (8) + +#define CW1200_BLOCK_ACK_CNT (30) +#define CW1200_BLOCK_ACK_THLD (800) +#define CW1200_BLOCK_ACK_HIST (3) +#define CW1200_BLOCK_ACK_INTERVAL (1 * HZ / CW1200_BLOCK_ACK_HIST) + +#define CW1200_JOIN_TIMEOUT (1 * HZ) +#define CW1200_AUTH_TIMEOUT (5 * HZ) + +struct cw1200_ht_info { + struct ieee80211_sta_ht_cap ht_cap; + enum nl80211_channel_type channel_type; + u16 operation_mode; +}; + +/* Please keep order */ +enum cw1200_join_status { + CW1200_JOIN_STATUS_PASSIVE = 0, + CW1200_JOIN_STATUS_MONITOR, + CW1200_JOIN_STATUS_JOINING, + CW1200_JOIN_STATUS_PRE_STA, + CW1200_JOIN_STATUS_STA, + CW1200_JOIN_STATUS_IBSS, + CW1200_JOIN_STATUS_AP, +}; + +enum cw1200_link_status { + CW1200_LINK_OFF, + CW1200_LINK_RESERVE, + CW1200_LINK_SOFT, + CW1200_LINK_HARD, + CW1200_LINK_RESET, + CW1200_LINK_RESET_REMAP, +}; + +extern int cw1200_power_mode; +extern const char * const cw1200_fw_types[]; + +struct cw1200_link_entry { + unsigned long timestamp; + enum cw1200_link_status status; + enum cw1200_link_status prev_status; + u8 mac[ETH_ALEN]; + u8 buffered[CW1200_MAX_TID]; + struct sk_buff_head rx_queue; +}; + +struct cw1200_common { + /* interfaces to the rest of the stack */ + struct ieee80211_hw *hw; + struct ieee80211_vif *vif; + struct device *pdev; + + /* Statistics */ + struct ieee80211_low_level_stats stats; + + /* Our macaddr */ + u8 mac_addr[ETH_ALEN]; + + /* Hardware interface */ + const struct sbus_ops *sbus_ops; + struct sbus_priv *sbus_priv; + + /* Hardware information */ + enum { + HIF_9000_SILICON_VERSATILE = 0, + HIF_8601_VERSATILE, + HIF_8601_SILICON, + } hw_type; + enum { + CW1200_HW_REV_CUT10 = 10, + CW1200_HW_REV_CUT11 = 11, + CW1200_HW_REV_CUT20 = 20, + CW1200_HW_REV_CUT22 = 22, + CW1X60_HW_REV = 40, + } hw_revision; + int hw_refclk; + bool hw_have_5ghz; + const struct firmware *sdd; + char *sdd_path; + + struct cw1200_debug_priv *debug; + + struct workqueue_struct *workqueue; + struct mutex conf_mutex; + + struct cw1200_queue tx_queue[4]; + struct cw1200_queue_stats tx_queue_stats; + int tx_burst_idx; + + /* firmware/hardware info */ + unsigned int tx_hdr_len; + + /* Radio data */ + int output_power; + + /* BBP/MAC state */ + struct ieee80211_rate *rates; + struct ieee80211_rate *mcs_rates; + struct ieee80211_channel *channel; + struct wsm_edca_params edca; + struct wsm_tx_queue_params tx_queue_params; + struct wsm_mib_association_mode association_mode; + struct wsm_set_bss_params bss_params; + struct cw1200_ht_info ht_info; + struct wsm_set_pm powersave_mode; + struct wsm_set_pm firmware_ps_mode; + int cqm_rssi_thold; + unsigned cqm_rssi_hyst; + bool cqm_use_rssi; + int cqm_beacon_loss_count; + int channel_switch_in_progress; + wait_queue_head_t channel_switch_done; + u8 long_frame_max_tx_count; + u8 short_frame_max_tx_count; + int mode; + bool enable_beacon; + int beacon_int; + bool listening; + struct wsm_rx_filter rx_filter; + struct wsm_mib_multicast_filter multicast_filter; + bool has_multicast_subscription; + bool disable_beacon_filter; + struct work_struct update_filtering_work; + struct work_struct set_beacon_wakeup_period_work; + + u8 ba_rx_tid_mask; + u8 ba_tx_tid_mask; + + struct cw1200_pm_state pm_state; + + struct wsm_p2p_ps_modeinfo p2p_ps_modeinfo; + struct wsm_uapsd_info uapsd_info; + bool setbssparams_done; + bool bt_present; + u8 conf_listen_interval; + u32 listen_interval; + u32 erp_info; + u32 rts_threshold; + + /* BH */ + atomic_t bh_rx; + atomic_t bh_tx; + atomic_t bh_term; + atomic_t bh_suspend; + + struct workqueue_struct *bh_workqueue; + struct work_struct bh_work; + + int bh_error; + wait_queue_head_t bh_wq; + wait_queue_head_t bh_evt_wq; + u8 buf_id_tx; + u8 buf_id_rx; + u8 wsm_rx_seq; + u8 wsm_tx_seq; + int hw_bufs_used; + bool powersave_enabled; + bool device_can_sleep; + + /* Scan status */ + struct cw1200_scan scan; + /* Keep cw1200 awake (WUP = 1) 1 second after each scan to avoid + * FW issue with sleeping/waking up. */ + atomic_t recent_scan; + struct delayed_work clear_recent_scan_work; + + /* WSM */ + struct wsm_startup_ind wsm_caps; + struct mutex wsm_cmd_mux; + struct wsm_buf wsm_cmd_buf; + struct wsm_cmd wsm_cmd; + wait_queue_head_t wsm_cmd_wq; + wait_queue_head_t wsm_startup_done; + int firmware_ready; + atomic_t tx_lock; + + /* WSM debug */ + int wsm_enable_wsm_dumps; + + /* WSM Join */ + enum cw1200_join_status join_status; + u32 pending_frame_id; + bool join_pending; + struct delayed_work join_timeout; + struct work_struct unjoin_work; + struct work_struct join_complete_work; + int join_complete_status; + int join_dtim_period; + bool delayed_unjoin; + + /* TX/RX and security */ + s8 wep_default_key_id; + struct work_struct wep_key_work; + u32 key_map; + struct wsm_add_key keys[WSM_KEY_MAX_INDEX + 1]; + + /* AP powersave */ + u32 link_id_map; + struct cw1200_link_entry link_id_db[CW1200_MAX_STA_IN_AP_MODE]; + struct work_struct link_id_work; + struct delayed_work link_id_gc_work; + u32 sta_asleep_mask; + u32 pspoll_mask; + bool aid0_bit_set; + spinlock_t ps_state_lock; /* Protect power save state */ + bool buffered_multicasts; + bool tx_multicast; + struct work_struct set_tim_work; + struct work_struct set_cts_work; + struct work_struct multicast_start_work; + struct work_struct multicast_stop_work; + struct timer_list mcast_timeout; + + /* WSM events and CQM implementation */ + spinlock_t event_queue_lock; /* Protect event queue */ + struct list_head event_queue; + struct work_struct event_handler; + + struct delayed_work bss_loss_work; + spinlock_t bss_loss_lock; /* Protect BSS loss state */ + int bss_loss_state; + int bss_loss_confirm_id; + int delayed_link_loss; + struct work_struct bss_params_work; + + /* TX rate policy cache */ + struct tx_policy_cache tx_policy_cache; + struct work_struct tx_policy_upload_work; + + /* legacy PS mode switch in suspend */ + int ps_mode_switch_in_progress; + wait_queue_head_t ps_mode_switch_done; + + /* Workaround for WFD testcase 6.1.10*/ + struct work_struct linkid_reset_work; + u8 action_frame_sa[ETH_ALEN]; + u8 action_linkid; + +#ifdef CONFIG_CW1200_ETF + struct sk_buff_head etf_q; +#endif +}; + +struct cw1200_sta_priv { + int link_id; +}; + +/* interfaces for the drivers */ +int cw1200_core_probe(const struct sbus_ops *sbus_ops, + struct sbus_priv *sbus, + struct device *pdev, + struct cw1200_common **pself, + int ref_clk, const u8 *macaddr, + const char *sdd_path, bool have_5ghz); +void cw1200_core_release(struct cw1200_common *self); + +#define FWLOAD_BLOCK_SIZE (1024) + +static inline int cw1200_is_ht(const struct cw1200_ht_info *ht_info) +{ + return ht_info->channel_type != NL80211_CHAN_NO_HT; +} + +static inline int cw1200_ht_greenfield(const struct cw1200_ht_info *ht_info) +{ + return cw1200_is_ht(ht_info) && + (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + !(ht_info->operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); +} + +static inline int cw1200_ht_ampdu_density(const struct cw1200_ht_info *ht_info) +{ + if (!cw1200_is_ht(ht_info)) + return 0; + return ht_info->ht_cap.ampdu_density; +} + +#endif /* CW1200_H */ diff --git a/drivers/net/wireless/cw1200/cw1200_sagrad.c b/drivers/net/wireless/cw1200/cw1200_sagrad.c new file mode 100644 index 000000000000..a5ada0eda1dd --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200_sagrad.c @@ -0,0 +1,145 @@ +/* + * Platform glue data for ST-Ericsson CW1200 driver + * + * Copyright (c) 2013, Sagrad, Inc + * Author: Solomon Peachy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +MODULE_AUTHOR("Solomon Peachy "); +MODULE_DESCRIPTION("ST-Ericsson CW1200 Platform glue driver"); +MODULE_LICENSE("GPL"); + +/* Define just one of these. Feel free to customize as needed */ +#define SAGRAD_1091_1098_EVK_SDIO +/* #define SAGRAD_1091_1098_EVK_SPI */ + +#ifdef SAGRAD_1091_1098_EVK_SDIO +#if 0 +static struct resource cw1200_href_resources[] = { + { + .start = 215, /* fix me as appropriate */ + .end = 215, /* ditto */ + .flags = IORESOURCE_IO, + .name = "cw1200_wlan_reset", + }, + { + .start = 216, /* fix me as appropriate */ + .end = 216, /* ditto */ + .flags = IORESOURCE_IO, + .name = "cw1200_wlan_powerup", + }, + { + .start = NOMADIK_GPIO_TO_IRQ(216), /* fix me as appropriate */ + .end = NOMADIK_GPIO_TO_IRQ(216), /* ditto */ + .flags = IORESOURCE_IRQ, + .name = "cw1200_wlan_irq", + }, +}; +#endif + +static int cw1200_power_ctrl(const struct cw1200_platform_data_sdio *pdata, + bool enable) +{ + /* Control 3v3 and 1v8 to hardware as appropriate */ + /* Note this is not needed if it's controlled elsewhere or always on */ + + /* May require delay for power to stabilize */ + return 0; +} + +static int cw1200_clk_ctrl(const struct cw1200_platform_data_sdio *pdata, + bool enable) +{ + /* Turn CLK_32K off and on as appropriate. */ + /* Note this is not needed if it's always on */ + + /* May require delay for clock to stabilize */ + return 0; +} + +static struct cw1200_platform_data_sdio cw1200_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, +#if 0 + .reset = &cw1200_href_resources[0], + .powerup = &cw1200_href_resources[1], + .irq = &cw1200_href_resources[2], +#endif + .power_ctrl = cw1200_power_ctrl, + .clk_ctrl = cw1200_clk_ctrl, +/* .macaddr = ??? */ + .sdd_file = "sdd_sagrad_1091_1098.bin", +}; +#endif + +#ifdef SAGRAD_1091_1098_EVK_SPI +/* Note that this is an example of integrating into your board support file */ +static struct resource cw1200_href_resources[] = { + { + .start = GPIO_RF_RESET, + .end = GPIO_RF_RESET, + .flags = IORESOURCE_IO, + .name = "cw1200_wlan_reset", + }, + { + .start = GPIO_RF_POWERUP, + .end = GPIO_RF_POWERUP, + .flags = IORESOURCE_IO, + .name = "cw1200_wlan_powerup", + }, +}; + +static int cw1200_power_ctrl(const struct cw1200_platform_data_spi *pdata, + bool enable) +{ + /* Control 3v3 and 1v8 to hardware as appropriate */ + /* Note this is not needed if it's controlled elsewhere or always on */ + + /* May require delay for power to stabilize */ + return 0; +} +static int cw1200_clk_ctrl(const struct cw1200_platform_data_spi *pdata, + bool enable) +{ + /* Turn CLK_32K off and on as appropriate. */ + /* Note this is not needed if it's always on */ + + /* May require delay for clock to stabilize */ + return 0; +} + +static struct cw1200_platform_data_spi cw1200_platform_data = { + .ref_clk = 38400, + .spi_bits_per_word = 16, + .reset = &cw1200_href_resources[0], + .powerup = &cw1200_href_resources[1], + .power_ctrl = cw1200_power_ctrl, + .clk_ctrl = cw1200_clk_ctrl, +/* .macaddr = ??? */ + .sdd_file = "sdd_sagrad_1091_1098.bin", +}; +static struct spi_board_info myboard_spi_devices[] __initdata = { + { + .modalias = "cw1200_wlan_spi", + .max_speed_hz = 10000000, /* 52MHz Max */ + .bus_num = 0, + .irq = WIFI_IRQ, + .platform_data = &cw1200_platform_data, + .chip_select = 0, + }, +}; +#endif + + +const void *cw1200_get_platform_data(void) +{ + return &cw1200_platform_data; +} +EXPORT_SYMBOL_GPL(cw1200_get_platform_data); diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c new file mode 100644 index 000000000000..f6e22192d80a --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200_sdio.c @@ -0,0 +1,409 @@ +/* + * Mac80211 SDIO driver for ST-Ericsson CW1200 device + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cw1200.h" +#include "sbus.h" +#include +#include "hwio.h" + +MODULE_AUTHOR("Dmitry Tarnyagin "); +MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SDIO driver"); +MODULE_LICENSE("GPL"); + +#define SDIO_BLOCK_SIZE (512) + +struct sbus_priv { + struct sdio_func *func; + struct cw1200_common *core; + const struct cw1200_platform_data_sdio *pdata; +}; + +#ifndef SDIO_VENDOR_ID_STE +#define SDIO_VENDOR_ID_STE 0x0020 +#endif + +#ifndef SDIO_DEVICE_ID_STE_CW1200 +#define SDIO_DEVICE_ID_STE_CW1200 0x2280 +#endif + +static const struct sdio_device_id cw1200_sdio_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) }, + { /* end: all zeroes */ }, +}; + +/* sbus_ops implemetation */ + +static int cw1200_sdio_memcpy_fromio(struct sbus_priv *self, + unsigned int addr, + void *dst, int count) +{ + return sdio_memcpy_fromio(self->func, dst, addr, count); +} + +static int cw1200_sdio_memcpy_toio(struct sbus_priv *self, + unsigned int addr, + const void *src, int count) +{ + return sdio_memcpy_toio(self->func, addr, (void *)src, count); +} + +static void cw1200_sdio_lock(struct sbus_priv *self) +{ + sdio_claim_host(self->func); +} + +static void cw1200_sdio_unlock(struct sbus_priv *self) +{ + sdio_release_host(self->func); +} + +static void cw1200_sdio_irq_handler(struct sdio_func *func) +{ + struct sbus_priv *self = sdio_get_drvdata(func); + + /* note: sdio_host already claimed here. */ + if (self->core) + cw1200_irq_handler(self->core); +} + +static irqreturn_t cw1200_gpio_hardirq(int irq, void *dev_id) +{ + return IRQ_WAKE_THREAD; +} + +static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id) +{ + struct sbus_priv *self = dev_id; + + if (self->core) { + sdio_claim_host(self->func); + cw1200_irq_handler(self->core); + sdio_release_host(self->func); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static int cw1200_request_irq(struct sbus_priv *self) +{ + int ret; + const struct resource *irq = self->pdata->irq; + u8 cccr; + + cccr = sdio_f0_readb(self->func, SDIO_CCCR_IENx, &ret); + if (WARN_ON(ret)) + goto err; + + /* Master interrupt enable ... */ + cccr |= BIT(0); + + /* ... for our function */ + cccr |= BIT(self->func->num); + + sdio_f0_writeb(self->func, cccr, SDIO_CCCR_IENx, &ret); + if (WARN_ON(ret)) + goto err; + + ret = enable_irq_wake(irq->start); + if (WARN_ON(ret)) + goto err; + + /* Request the IRQ */ + ret = request_threaded_irq(irq->start, cw1200_gpio_hardirq, + cw1200_gpio_irq, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + irq->name, self); + if (WARN_ON(ret)) + goto err; + + return 0; + +err: + return ret; +} + +static int cw1200_sdio_irq_subscribe(struct sbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ subscribe\n"); + sdio_claim_host(self->func); + if (self->pdata->irq) + ret = cw1200_request_irq(self); + else + ret = sdio_claim_irq(self->func, cw1200_sdio_irq_handler); + + sdio_release_host(self->func); + return ret; +} + +static int cw1200_sdio_irq_unsubscribe(struct sbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ unsubscribe\n"); + + if (self->pdata->irq) { + disable_irq_wake(self->pdata->irq->start); + free_irq(self->pdata->irq->start, self); + } else { + sdio_claim_host(self->func); + ret = sdio_release_irq(self->func); + sdio_release_host(self->func); + } + return ret; +} + +static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata) +{ + const struct resource *reset = pdata->reset; + + if (reset) { + gpio_set_value(reset->start, 0); + msleep(30); /* Min is 2 * CLK32K cycles */ + gpio_free(reset->start); + } + + if (pdata->power_ctrl) + pdata->power_ctrl(pdata, false); + if (pdata->clk_ctrl) + pdata->clk_ctrl(pdata, false); + + return 0; +} + +static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata) +{ + const struct resource *reset = pdata->reset; + const struct resource *powerup = pdata->reset; + + /* Ensure I/Os are pulled low */ + if (reset) { + gpio_request(reset->start, reset->name); + gpio_direction_output(reset->start, 0); + } + if (powerup) { + gpio_request(powerup->start, powerup->name); + gpio_direction_output(powerup->start, 0); + } + if (reset || powerup) + msleep(50); /* Settle time */ + + /* Enable 3v3 and 1v8 to hardware */ + if (pdata->power_ctrl) { + if (pdata->power_ctrl(pdata, true)) { + pr_err("power_ctrl() failed!\n"); + return -1; + } + } + + /* Enable CLK32K */ + if (pdata->clk_ctrl) { + if (pdata->clk_ctrl(pdata, true)) { + pr_err("clk_ctrl() failed!\n"); + return -1; + } + msleep(10); /* Delay until clock is stable for 2 cycles */ + } + + /* Enable POWERUP signal */ + if (powerup) { + gpio_set_value(powerup->start, 1); + msleep(250); /* or more..? */ + } + /* Enable RSTn signal */ + if (reset) { + gpio_set_value(reset->start, 1); + msleep(50); /* Or more..? */ + } + return 0; +} + +static size_t cw1200_sdio_align_size(struct sbus_priv *self, size_t size) +{ + if (self->pdata->no_nptb) + size = round_up(size, SDIO_BLOCK_SIZE); + else + size = sdio_align_size(self->func, size); + + return size; +} + +static int cw1200_sdio_pm(struct sbus_priv *self, bool suspend) +{ + int ret = 0; + + if (self->pdata->irq) + ret = irq_set_irq_wake(self->pdata->irq->start, suspend); + return ret; +} + +static struct sbus_ops cw1200_sdio_sbus_ops = { + .sbus_memcpy_fromio = cw1200_sdio_memcpy_fromio, + .sbus_memcpy_toio = cw1200_sdio_memcpy_toio, + .lock = cw1200_sdio_lock, + .unlock = cw1200_sdio_unlock, + .align_size = cw1200_sdio_align_size, + .power_mgmt = cw1200_sdio_pm, +}; + +/* Probe Function to be called by SDIO stack when device is discovered */ +static int cw1200_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct sbus_priv *self; + int status; + + pr_info("cw1200_wlan_sdio: Probe called\n"); + + /* We are only able to handle the wlan function */ + if (func->num != 0x01) + return -ENODEV; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + pr_err("Can't allocate SDIO sbus_priv.\n"); + return -ENOMEM; + } + + func->card->quirks |= MMC_QUIRK_LENIENT_FN0; + + self->pdata = cw1200_get_platform_data(); + self->func = func; + sdio_set_drvdata(func, self); + sdio_claim_host(func); + sdio_enable_func(func); + sdio_release_host(func); + + status = cw1200_sdio_irq_subscribe(self); + + status = cw1200_core_probe(&cw1200_sdio_sbus_ops, + self, &func->dev, &self->core, + self->pdata->ref_clk, + self->pdata->macaddr, + self->pdata->sdd_file, + self->pdata->have_5ghz); + if (status) { + cw1200_sdio_irq_unsubscribe(self); + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + sdio_set_drvdata(func, NULL); + kfree(self); + } + + return status; +} + +/* Disconnect Function to be called by SDIO stack when + * device is disconnected */ +static void cw1200_sdio_disconnect(struct sdio_func *func) +{ + struct sbus_priv *self = sdio_get_drvdata(func); + + if (self) { + cw1200_sdio_irq_unsubscribe(self); + if (self->core) { + cw1200_core_release(self->core); + self->core = NULL; + } + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + sdio_set_drvdata(func, NULL); + kfree(self); + } +} + +static int cw1200_sdio_suspend(struct device *dev) +{ + int ret; + struct sdio_func *func = dev_to_sdio_func(dev); + struct sbus_priv *self = sdio_get_drvdata(func); + + if (!cw1200_can_suspend(self->core)) + return -EAGAIN; + + /* Notify SDIO that CW1200 will remain powered during suspend */ + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) + pr_err("Error setting SDIO pm flags: %i\n", ret); + + return ret; +} + +static int cw1200_sdio_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops cw1200_pm_ops = { + .suspend = cw1200_sdio_suspend, + .resume = cw1200_sdio_resume, +}; + +static struct sdio_driver sdio_driver = { + .name = "cw1200_wlan_sdio", + .id_table = cw1200_sdio_ids, + .probe = cw1200_sdio_probe, + .remove = cw1200_sdio_disconnect, + .drv = { + .pm = &cw1200_pm_ops, + } +}; + +/* Init Module function -> Called by insmod */ +static int __init cw1200_sdio_init(void) +{ + const struct cw1200_platform_data_sdio *pdata; + int ret; + + pdata = cw1200_get_platform_data(); + + if (cw1200_sdio_on(pdata)) { + ret = -1; + goto err; + } + + ret = sdio_register_driver(&sdio_driver); + if (ret) + goto err; + + return 0; + +err: + cw1200_sdio_off(pdata); + return ret; +} + +/* Called at Driver Unloading */ +static void __exit cw1200_sdio_exit(void) +{ + const struct cw1200_platform_data_sdio *pdata; + pdata = cw1200_get_platform_data(); + sdio_unregister_driver(&sdio_driver); + cw1200_sdio_off(pdata); +} + + +module_init(cw1200_sdio_init); +module_exit(cw1200_sdio_exit); diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c new file mode 100644 index 000000000000..04af68534854 --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -0,0 +1,480 @@ +/* + * Mac80211 SPI driver for ST-Ericsson CW1200 device + * + * Copyright (c) 2011, Sagrad Inc. + * Author: Solomon Peachy + * + * Based on cw1200_sdio.c + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "cw1200.h" +#include "sbus.h" +#include +#include "hwio.h" + +MODULE_AUTHOR("Solomon Peachy "); +MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SPI driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:cw1200_wlan_spi"); + +/* #define SPI_DEBUG */ + +struct sbus_priv { + struct spi_device *func; + struct cw1200_common *core; + const struct cw1200_platform_data_spi *pdata; + spinlock_t lock; /* Serialize all bus operations */ + int claimed; +}; + +#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) +#define SET_WRITE 0x7FFF /* usage: and operation */ +#define SET_READ 0x8000 /* usage: or operation */ + +/* + Notes on byte ordering: + LE: B0 B1 B2 B3 + BE: B3 B2 B1 B0 + + Hardware expects 32-bit data to be written as 16-bit BE words: + + B1 B0 B3 B2 + +*/ + +static int cw1200_spi_memcpy_fromio(struct sbus_priv *self, + unsigned int addr, + void *dst, int count) +{ + int ret, i; + uint16_t regaddr; + struct spi_message m; + + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .rx_buf = dst, + .len = count, + }; + + regaddr = (SDIO_TO_SPI_ADDR(addr))<<12; + regaddr |= SET_READ; + regaddr |= (count>>1); + regaddr = cpu_to_le16(regaddr); + +#ifdef SPI_DEBUG + pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, + le16_to_cpu(regaddr)); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + regaddr = swab16(regaddr); + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + ret = spi_sync(self->func, &m); + +#ifdef SPI_DEBUG + pr_info("READ : "); + for (i = 0; i < t_addr.len; i++) + printk("%02x ", ((u8 *)t_addr.tx_buf)[i]); + printk(" : "); + for (i = 0; i < t_msg.len; i++) + printk("%02x ", ((u8 *)t_msg.rx_buf)[i]); + printk("\n"); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)dst; + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + + return ret; +} + +static int cw1200_spi_memcpy_toio(struct sbus_priv *self, + unsigned int addr, + const void *src, int count) +{ + int rval, i; + uint16_t regaddr; + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .tx_buf = src, + .len = count, + }; + struct spi_message m; + + regaddr = (SDIO_TO_SPI_ADDR(addr))<<12; + regaddr &= SET_WRITE; + regaddr |= (count>>1); + regaddr = cpu_to_le16(regaddr); + +#ifdef SPI_DEBUG + pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr, + le16_to_cpu(regaddr)); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)src; + regaddr = swab16(regaddr); + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + +#ifdef SPI_DEBUG + pr_info("WRITE: "); + for (i = 0; i < t_addr.len; i++) + printk("%02x ", ((u8 *)t_addr.tx_buf)[i]); + printk(" : "); + for (i = 0; i < t_msg.len; i++) + printk("%02x ", ((u8 *)t_msg.tx_buf)[i]); + printk("\n"); +#endif + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + rval = spi_sync(self->func, &m); + +#ifdef SPI_DEBUG + pr_info("WROTE: %d\n", m.actual_length); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)src; + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + return rval; +} + +static void cw1200_spi_lock(struct sbus_priv *self) +{ + unsigned long flags; + + might_sleep(); + + spin_lock_irqsave(&self->lock, flags); + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!self->claimed) + break; + spin_unlock_irqrestore(&self->lock, flags); + schedule(); + spin_lock_irqsave(&self->lock, flags); + } + set_current_state(TASK_RUNNING); + self->claimed = 1; + spin_unlock_irqrestore(&self->lock, flags); + + return; +} + +static void cw1200_spi_unlock(struct sbus_priv *self) +{ + unsigned long flags; + + spin_lock_irqsave(&self->lock, flags); + self->claimed = 0; + spin_unlock_irqrestore(&self->lock, flags); + return; +} + +static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id) +{ + struct sbus_priv *self = dev_id; + + if (self->core) { + cw1200_irq_handler(self->core); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static int cw1200_spi_irq_subscribe(struct sbus_priv *self) +{ + int ret; + + pr_debug("SW IRQ subscribe\n"); + + ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, + IRQF_TRIGGER_HIGH, + "cw1200_wlan_irq", self); + if (WARN_ON(ret < 0)) + goto exit; + + ret = enable_irq_wake(self->func->irq); + if (WARN_ON(ret)) + goto free_irq; + + return 0; + +free_irq: + free_irq(self->func->irq, self); +exit: + return ret; +} + +static int cw1200_spi_irq_unsubscribe(struct sbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ unsubscribe\n"); + disable_irq_wake(self->func->irq); + free_irq(self->func->irq, self); + + return ret; +} + +static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) +{ + const struct resource *reset = pdata->reset; + + if (reset) { + gpio_set_value(reset->start, 0); + msleep(30); /* Min is 2 * CLK32K cycles */ + gpio_free(reset->start); + } + + if (pdata->power_ctrl) + pdata->power_ctrl(pdata, false); + if (pdata->clk_ctrl) + pdata->clk_ctrl(pdata, false); + + return 0; +} + +static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata) +{ + const struct resource *reset = pdata->reset; + const struct resource *powerup = pdata->reset; + + /* Ensure I/Os are pulled low */ + if (reset) { + gpio_request(reset->start, reset->name); + gpio_direction_output(reset->start, 0); + } + if (powerup) { + gpio_request(powerup->start, powerup->name); + gpio_direction_output(powerup->start, 0); + } + if (reset || powerup) + msleep(10); /* Settle time? */ + + /* Enable 3v3 and 1v8 to hardware */ + if (pdata->power_ctrl) { + if (pdata->power_ctrl(pdata, true)) { + pr_err("power_ctrl() failed!\n"); + return -1; + } + } + + /* Enable CLK32K */ + if (pdata->clk_ctrl) { + if (pdata->clk_ctrl(pdata, true)) { + pr_err("clk_ctrl() failed!\n"); + return -1; + } + msleep(10); /* Delay until clock is stable for 2 cycles */ + } + + /* Enable POWERUP signal */ + if (powerup) { + gpio_set_value(powerup->start, 1); + msleep(250); /* or more..? */ + } + /* Enable RSTn signal */ + if (reset) { + gpio_set_value(reset->start, 1); + msleep(50); /* Or more..? */ + } + return 0; +} + +static size_t cw1200_spi_align_size(struct sbus_priv *self, size_t size) +{ + return size & 1 ? size + 1 : size; +} + +static int cw1200_spi_pm(struct sbus_priv *self, bool suspend) +{ + return irq_set_irq_wake(self->func->irq, suspend); +} + +static struct sbus_ops cw1200_spi_sbus_ops = { + .sbus_memcpy_fromio = cw1200_spi_memcpy_fromio, + .sbus_memcpy_toio = cw1200_spi_memcpy_toio, + .lock = cw1200_spi_lock, + .unlock = cw1200_spi_unlock, + .align_size = cw1200_spi_align_size, + .power_mgmt = cw1200_spi_pm, +}; + +/* Probe Function to be called by SPI stack when device is discovered */ +static int cw1200_spi_probe(struct spi_device *func) +{ + const struct cw1200_platform_data_spi *plat_data = + func->dev.platform_data; + struct sbus_priv *self; + int status; + + /* Sanity check speed */ + if (func->max_speed_hz > 52000000) + func->max_speed_hz = 52000000; + if (func->max_speed_hz < 1000000) + func->max_speed_hz = 1000000; + + /* Fix up transfer size */ + if (plat_data->spi_bits_per_word) + func->bits_per_word = plat_data->spi_bits_per_word; + if (!func->bits_per_word) + func->bits_per_word = 16; + + /* And finally.. */ + func->mode = SPI_MODE_0; + + pr_info("cw1200_wlan_spi: Probe called (CS %d M %d BPW %d CLK %d)\n", + func->chip_select, func->mode, func->bits_per_word, + func->max_speed_hz); + + if (cw1200_spi_on(plat_data)) { + pr_err("spi_on() failed!\n"); + return -1; + } + + if (spi_setup(func)) { + pr_err("spi_setup() failed!\n"); + return -1; + } + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + pr_err("Can't allocate SPI sbus_priv."); + return -ENOMEM; + } + + self->pdata = plat_data; + self->func = func; + spin_lock_init(&self->lock); + + spi_set_drvdata(func, self); + + status = cw1200_spi_irq_subscribe(self); + + status = cw1200_core_probe(&cw1200_spi_sbus_ops, + self, &func->dev, &self->core, + self->pdata->ref_clk, + self->pdata->macaddr, + self->pdata->sdd_file, + self->pdata->have_5ghz); + + if (status) { + cw1200_spi_irq_unsubscribe(self); + cw1200_spi_off(plat_data); + kfree(self); + } + + return status; +} + +/* Disconnect Function to be called by SPI stack when device is disconnected */ +static int cw1200_spi_disconnect(struct spi_device *func) +{ + struct sbus_priv *self = spi_get_drvdata(func); + + if (self) { + cw1200_spi_irq_unsubscribe(self); + if (self->core) { + cw1200_core_release(self->core); + self->core = NULL; + } + kfree(self); + } + cw1200_spi_off(func->dev.platform_data); + + return 0; +} + +static int cw1200_spi_suspend(struct device *dev, pm_message_t state) +{ + struct sbus_priv *self = spi_get_drvdata(to_spi_device(dev)); + + if (!cw1200_can_suspend(self->core)) + return -EAGAIN; + + /* XXX notify host that we have to keep CW1200 powered on? */ + return 0; +} + +static int cw1200_spi_resume(struct device *dev) +{ + return 0; +} + +static struct spi_driver spi_driver = { + .probe = cw1200_spi_probe, + .remove = cw1200_spi_disconnect, + .driver = { + .name = "cw1200_wlan_spi", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + .suspend = cw1200_spi_suspend, + .resume = cw1200_spi_resume, + }, +}; + +/* Init Module function -> Called by insmod */ +static int __init cw1200_spi_init(void) +{ + return spi_register_driver(&spi_driver); +} + +/* Called at Driver Unloading */ +static void __exit cw1200_spi_exit(void) +{ + spi_unregister_driver(&spi_driver); +} + +module_init(cw1200_spi_init); +module_exit(cw1200_spi_exit); diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c new file mode 100644 index 000000000000..b815181802e0 --- /dev/null +++ b/drivers/net/wireless/cw1200/debug.c @@ -0,0 +1,664 @@ +/* + * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers + * DebugFS code + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include "cw1200.h" +#include "debug.h" +#include "fwio.h" + +/* join_status */ +static const char * const cw1200_debug_join_status[] = { + "passive", + "monitor", + "station (joining)", + "station (not authenticated yet)", + "station", + "adhoc", + "access point", +}; + +/* WSM_JOIN_PREAMBLE_... */ +static const char * const cw1200_debug_preamble[] = { + "long", + "short", + "long on 1 and 2 Mbps", +}; + + +static const char * const cw1200_debug_link_id[] = { + "OFF", + "REQ", + "SOFT", + "HARD", +}; + +static const char *cw1200_debug_mode(int mode) +{ + switch (mode) { + case NL80211_IFTYPE_UNSPECIFIED: + return "unspecified"; + case NL80211_IFTYPE_MONITOR: + return "monitor"; + case NL80211_IFTYPE_STATION: + return "station"; + case NL80211_IFTYPE_ADHOC: + return "adhoc"; + case NL80211_IFTYPE_MESH_POINT: + return "mesh point"; + case NL80211_IFTYPE_AP: + return "access point"; + case NL80211_IFTYPE_P2P_CLIENT: + return "p2p client"; + case NL80211_IFTYPE_P2P_GO: + return "p2p go"; + default: + return "unsupported"; + } +} + +static void cw1200_queue_status_show(struct seq_file *seq, + struct cw1200_queue *q) +{ + int i; + seq_printf(seq, "Queue %d:\n", q->queue_id); + seq_printf(seq, " capacity: %zu\n", q->capacity); + seq_printf(seq, " queued: %zu\n", q->num_queued); + seq_printf(seq, " pending: %zu\n", q->num_pending); + seq_printf(seq, " sent: %zu\n", q->num_sent); + seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no"); + seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no"); + seq_puts(seq, " link map: 0-> "); + for (i = 0; i < q->stats->map_capacity; ++i) + seq_printf(seq, "%.2d ", q->link_map_cache[i]); + seq_printf(seq, "<-%zu\n", q->stats->map_capacity); +} + +static void cw1200_debug_print_map(struct seq_file *seq, + struct cw1200_common *priv, + const char *label, + u32 map) +{ + int i; + seq_printf(seq, "%s0-> ", label); + for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i) + seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : ".."); + seq_printf(seq, "<-%zu\n", priv->tx_queue_stats.map_capacity - 1); +} + +static int cw1200_status_show(struct seq_file *seq, void *v) +{ + int i; + struct list_head *item; + struct cw1200_common *priv = seq->private; + struct cw1200_debug_priv *d = priv->debug; + + seq_puts(seq, "CW1200 Wireless LAN driver status\n"); + seq_printf(seq, "Hardware: %d.%d\n", + priv->wsm_caps.hw_id, + priv->wsm_caps.hw_subid); + seq_printf(seq, "Firmware: %s %d.%d\n", + cw1200_fw_types[priv->wsm_caps.fw_type], + priv->wsm_caps.fw_ver, + priv->wsm_caps.fw_build); + seq_printf(seq, "FW API: %d\n", + priv->wsm_caps.fw_api); + seq_printf(seq, "FW caps: 0x%.4X\n", + priv->wsm_caps.fw_cap); + seq_printf(seq, "FW label: '%s'\n", + priv->wsm_caps.fw_label); + seq_printf(seq, "Mode: %s%s\n", + cw1200_debug_mode(priv->mode), + priv->listening ? " (listening)" : ""); + seq_printf(seq, "Join state: %s\n", + cw1200_debug_join_status[priv->join_status]); + if (priv->channel) + seq_printf(seq, "Channel: %d%s\n", + priv->channel->hw_value, + priv->channel_switch_in_progress ? + " (switching)" : ""); + if (priv->rx_filter.promiscuous) + seq_puts(seq, "Filter: promisc\n"); + else if (priv->rx_filter.fcs) + seq_puts(seq, "Filter: fcs\n"); + if (priv->rx_filter.bssid) + seq_puts(seq, "Filter: bssid\n"); + if (!priv->disable_beacon_filter) + seq_puts(seq, "Filter: beacons\n"); + + if (priv->enable_beacon || + priv->mode == NL80211_IFTYPE_AP || + priv->mode == NL80211_IFTYPE_ADHOC || + priv->mode == NL80211_IFTYPE_MESH_POINT || + priv->mode == NL80211_IFTYPE_P2P_GO) + seq_printf(seq, "Beaconing: %s\n", + priv->enable_beacon ? + "enabled" : "disabled"); + + for (i = 0; i < 4; ++i) + seq_printf(seq, "EDCA(%d): %d, %d, %d, %d, %d\n", i, + priv->edca.params[i].cwmin, + priv->edca.params[i].cwmax, + priv->edca.params[i].aifns, + priv->edca.params[i].txop_limit, + priv->edca.params[i].max_rx_lifetime); + + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + static const char *pm_mode = "unknown"; + switch (priv->powersave_mode.mode) { + case WSM_PSM_ACTIVE: + pm_mode = "off"; + break; + case WSM_PSM_PS: + pm_mode = "on"; + break; + case WSM_PSM_FAST_PS: + pm_mode = "dynamic"; + break; + } + seq_printf(seq, "Preamble: %s\n", + cw1200_debug_preamble[priv->association_mode.preamble]); + seq_printf(seq, "AMPDU spcn: %d\n", + priv->association_mode.mpdu_start_spacing); + seq_printf(seq, "Basic rate: 0x%.8X\n", + le32_to_cpu(priv->association_mode.basic_rate_set)); + seq_printf(seq, "Bss lost: %d beacons\n", + priv->bss_params.beacon_lost_count); + seq_printf(seq, "AID: %d\n", + priv->bss_params.aid); + seq_printf(seq, "Rates: 0x%.8X\n", + priv->bss_params.operational_rate_set); + seq_printf(seq, "Powersave: %s\n", pm_mode); + } + seq_printf(seq, "HT: %s\n", + cw1200_is_ht(&priv->ht_info) ? "on" : "off"); + if (cw1200_is_ht(&priv->ht_info)) { + seq_printf(seq, "Greenfield: %s\n", + cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no"); + seq_printf(seq, "AMPDU dens: %d\n", + cw1200_ht_ampdu_density(&priv->ht_info)); + } + seq_printf(seq, "RSSI thold: %d\n", + priv->cqm_rssi_thold); + seq_printf(seq, "RSSI hyst: %d\n", + priv->cqm_rssi_hyst); + seq_printf(seq, "Long retr: %d\n", + priv->long_frame_max_tx_count); + seq_printf(seq, "Short retr: %d\n", + priv->short_frame_max_tx_count); + spin_lock_bh(&priv->tx_policy_cache.lock); + i = 0; + list_for_each(item, &priv->tx_policy_cache.used) + ++i; + spin_unlock_bh(&priv->tx_policy_cache.lock); + seq_printf(seq, "RC in use: %d\n", i); + + seq_puts(seq, "\n"); + for (i = 0; i < 4; ++i) { + cw1200_queue_status_show(seq, &priv->tx_queue[i]); + seq_puts(seq, "\n"); + } + + cw1200_debug_print_map(seq, priv, "Link map: ", + priv->link_id_map); + cw1200_debug_print_map(seq, priv, "Asleep map: ", + priv->sta_asleep_mask); + cw1200_debug_print_map(seq, priv, "PSPOLL map: ", + priv->pspoll_mask); + + seq_puts(seq, "\n"); + + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (priv->link_id_db[i].status) { + seq_printf(seq, "Link %d: %s, %pM\n", + i + 1, + cw1200_debug_link_id[priv->link_id_db[i].status], + priv->link_id_db[i].mac); + } + } + + seq_puts(seq, "\n"); + + seq_printf(seq, "BH status: %s\n", + atomic_read(&priv->bh_term) ? "terminated" : "alive"); + seq_printf(seq, "Pending RX: %d\n", + atomic_read(&priv->bh_rx)); + seq_printf(seq, "Pending TX: %d\n", + atomic_read(&priv->bh_tx)); + if (priv->bh_error) + seq_printf(seq, "BH errcode: %d\n", + priv->bh_error); + seq_printf(seq, "TX bufs: %d x %d bytes\n", + priv->wsm_caps.input_buffers, + priv->wsm_caps.input_buffer_size); + seq_printf(seq, "Used bufs: %d\n", + priv->hw_bufs_used); + seq_printf(seq, "Powermgmt: %s\n", + priv->powersave_enabled ? "on" : "off"); + seq_printf(seq, "Device: %s\n", + priv->device_can_sleep ? "asleep" : "awake"); + + spin_lock(&priv->wsm_cmd.lock); + seq_printf(seq, "WSM status: %s\n", + priv->wsm_cmd.done ? "idle" : "active"); + seq_printf(seq, "WSM cmd: 0x%.4X (%td bytes)\n", + priv->wsm_cmd.cmd, priv->wsm_cmd.len); + seq_printf(seq, "WSM retval: %d\n", + priv->wsm_cmd.ret); + spin_unlock(&priv->wsm_cmd.lock); + + seq_printf(seq, "Datapath: %s\n", + atomic_read(&priv->tx_lock) ? "locked" : "unlocked"); + if (atomic_read(&priv->tx_lock)) + seq_printf(seq, "TXlock cnt: %d\n", + atomic_read(&priv->tx_lock)); + + seq_printf(seq, "TXed: %d\n", + d->tx); + seq_printf(seq, "AGG TXed: %d\n", + d->tx_agg); + seq_printf(seq, "MULTI TXed: %d (%d)\n", + d->tx_multi, d->tx_multi_frames); + seq_printf(seq, "RXed: %d\n", + d->rx); + seq_printf(seq, "AGG RXed: %d\n", + d->rx_agg); + seq_printf(seq, "TX miss: %d\n", + d->tx_cache_miss); + seq_printf(seq, "TX align: %d\n", + d->tx_align); + seq_printf(seq, "TX burst: %d\n", + d->tx_burst); + seq_printf(seq, "TX TTL: %d\n", + d->tx_ttl); + seq_printf(seq, "Scan: %s\n", + atomic_read(&priv->scan.in_progress) ? "active" : "idle"); + + return 0; +} + +static int cw1200_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, &cw1200_status_show, + inode->i_private); +} + +static const struct file_operations fops_status = { + .open = cw1200_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int cw1200_counters_show(struct seq_file *seq, void *v) +{ + int ret; + struct cw1200_common *priv = seq->private; + struct wsm_mib_counters_table counters; + + ret = wsm_get_counters_table(priv, &counters); + if (ret) + return ret; + +#define PUT_COUNTER(tab, name) \ + seq_printf(seq, "%s:" tab "%d\n", #name, \ + __le32_to_cpu(counters.name)) + + PUT_COUNTER("\t\t", plcp_errors); + PUT_COUNTER("\t\t", fcs_errors); + PUT_COUNTER("\t\t", tx_packets); + PUT_COUNTER("\t\t", rx_packets); + PUT_COUNTER("\t\t", rx_packet_errors); + PUT_COUNTER("\t", rx_decryption_failures); + PUT_COUNTER("\t\t", rx_mic_failures); + PUT_COUNTER("\t", rx_no_key_failures); + PUT_COUNTER("\t", tx_multicast_frames); + PUT_COUNTER("\t", tx_frames_success); + PUT_COUNTER("\t", tx_frame_failures); + PUT_COUNTER("\t", tx_frames_retried); + PUT_COUNTER("\t", tx_frames_multi_retried); + PUT_COUNTER("\t", rx_frame_duplicates); + PUT_COUNTER("\t\t", rts_success); + PUT_COUNTER("\t\t", rts_failures); + PUT_COUNTER("\t\t", ack_failures); + PUT_COUNTER("\t", rx_multicast_frames); + PUT_COUNTER("\t", rx_frames_success); + PUT_COUNTER("\t", rx_cmac_icv_errors); + PUT_COUNTER("\t\t", rx_cmac_replays); + PUT_COUNTER("\t", rx_mgmt_ccmp_replays); + +#undef PUT_COUNTER + + return 0; +} + +static int cw1200_counters_open(struct inode *inode, struct file *file) +{ + return single_open(file, &cw1200_counters_show, + inode->i_private); +} + +static const struct file_operations fops_counters = { + .open = cw1200_counters_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int cw1200_generic_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#ifdef CONFIG_CW1200_ETF +static int cw1200_etf_out_show(struct seq_file *seq, void *v) +{ + struct cw1200_common *priv = seq->private; + struct sk_buff *skb; + u32 len = 0; + + skb = skb_dequeue(&priv->etf_q); + + if (skb) + len = skb->len; + + seq_write(seq, &len, sizeof(len)); + + if (skb) { + seq_write(seq, skb->data, len); + kfree_skb(skb); + } + + return 0; +} + +static int cw1200_etf_out_open(struct inode *inode, struct file *file) +{ + return single_open(file, &cw1200_etf_out_show, + inode->i_private); +} + +static const struct file_operations fops_etf_out = { + .open = cw1200_etf_out_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +struct etf_req_msg; +static int etf_request(struct cw1200_common *priv, + struct etf_req_msg *msg, u32 len); + +#define MAX_RX_SZE 2600 + +struct etf_in_state { + struct cw1200_common *priv; + u32 total_len; + u8 buf[MAX_RX_SZE]; + u32 written; +}; + +static int cw1200_etf_in_open(struct inode *inode, struct file *file) +{ + struct etf_in_state *etf = kmalloc(sizeof(struct etf_in_state), + GFP_KERNEL); + + if (!etf) + return -ENOMEM; + + etf->written = 0; + etf->total_len = 0; + etf->priv = inode->i_private; + + file->private_data = etf; + + return 0; +} + +static int cw1200_etf_in_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static ssize_t cw1200_etf_in_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct etf_in_state *etf = file->private_data; + + ssize_t written = 0; + + if (!etf->total_len) { + if (count < sizeof(etf->total_len)) { + pr_err("count < sizeof(total_len)\n"); + return -EINVAL; + } + + if (copy_from_user(&etf->total_len, user_buf, + sizeof(etf->total_len))) { + pr_err("copy_from_user (len) failed\n"); + return -EFAULT; + } + + written += sizeof(etf->total_len); + count -= sizeof(etf->total_len); + } + + if (!count) + goto done; + + if (copy_from_user(etf->buf + etf->written, user_buf + written, + count)) { + pr_err("copy_from_user (payload %zu) failed\n", count); + return -EFAULT; + } + + written += count; + etf->written += count; + + if (etf->written >= etf->total_len) { + if (etf_request(etf->priv, (struct etf_req_msg *)etf->buf, + etf->total_len)) { + pr_err("etf_request failed\n"); + return -EIO; + } + } + +done: + return written; +} + +static const struct file_operations fops_etf_in = { + .open = cw1200_etf_in_open, + .release = cw1200_etf_in_release, + .write = cw1200_etf_in_write, + .llseek = default_llseek, + .owner = THIS_MODULE, +}; +#endif /* CONFIG_CW1200_ETF */ + +static ssize_t cw1200_wsm_dumps(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct cw1200_common *priv = file->private_data; + char buf[1]; + + if (!count) + return -EINVAL; + if (copy_from_user(buf, user_buf, 1)) + return -EFAULT; + + if (buf[0] == '1') + priv->wsm_enable_wsm_dumps = 1; + else + priv->wsm_enable_wsm_dumps = 0; + + return count; +} + +static const struct file_operations fops_wsm_dumps = { + .open = cw1200_generic_open, + .write = cw1200_wsm_dumps, + .llseek = default_llseek, +}; + +int cw1200_debug_init(struct cw1200_common *priv) +{ + int ret = -ENOMEM; + struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv), + GFP_KERNEL); + priv->debug = d; + if (!d) + return ret; + + d->debugfs_phy = debugfs_create_dir("cw1200", + priv->hw->wiphy->debugfsdir); + if (!d->debugfs_phy) + goto err; + + if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy, + priv, &fops_status)) + goto err; + + if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy, + priv, &fops_counters)) + goto err; + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) { + skb_queue_head_init(&priv->etf_q); + + if (!debugfs_create_file("etf_out", S_IRUSR, d->debugfs_phy, + priv, &fops_etf_out)) + goto err; + if (!debugfs_create_file("etf_in", S_IWUSR, d->debugfs_phy, + priv, &fops_etf_in)) + goto err; + } +#endif /* CONFIG_CW1200_ETF */ + + if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy, + priv, &fops_wsm_dumps)) + goto err; + + ret = cw1200_itp_init(priv); + if (ret) + goto err; + + return 0; + +err: + priv->debug = NULL; + debugfs_remove_recursive(d->debugfs_phy); + kfree(d); + return ret; +} + +void cw1200_debug_release(struct cw1200_common *priv) +{ + struct cw1200_debug_priv *d = priv->debug; + if (d) { + cw1200_itp_release(priv); + priv->debug = NULL; + kfree(d); + } +} + +#ifdef CONFIG_CW1200_ETF +struct cw1200_sdd { + u8 id; + u8 len; + u8 data[]; +}; + +struct etf_req_msg { + u32 id; + u32 len; + u8 data[]; +}; + +static int parse_sdd_file(struct cw1200_common *priv, u8 *data, u32 length) +{ + struct cw1200_sdd *ie; + + while (length > 0) { + ie = (struct cw1200_sdd *)data; + if (ie->id == SDD_REFERENCE_FREQUENCY_ELT_ID) { + priv->hw_refclk = cpu_to_le16(*((u16 *)ie->data)); + pr_info("Using Reference clock frequency %d KHz\n", + priv->hw_refclk); + break; + } + + length -= ie->len + sizeof(*ie); + data += ie->len + sizeof(*ie); + } + return 0; +} + +char *etf_firmware; + +#define ST90TDS_START_ADAPTER 0x09 /* Loads firmware too */ +#define ST90TDS_STOP_ADAPTER 0x0A +#define ST90TDS_CONFIG_ADAPTER 0x0E /* Send configuration params */ +#define ST90TDS_SBUS_READ 0x13 +#define ST90TDS_SBUS_WRITE 0x14 +#define ST90TDS_GET_DEVICE_OPTION 0x19 +#define ST90TDS_SET_DEVICE_OPTION 0x1A +#define ST90TDS_SEND_SDD 0x1D /* SDD File used to find DPLL */ + +#include "fwio.h" + +static int etf_request(struct cw1200_common *priv, + struct etf_req_msg *msg, + u32 len) +{ + int rval = -1; + switch (msg->id) { + case ST90TDS_START_ADAPTER: + etf_firmware = "cw1200_etf.bin"; + pr_info("ETF_START (len %d, '%s')\n", len, etf_firmware); + rval = cw1200_load_firmware(priv); + break; + case ST90TDS_STOP_ADAPTER: + pr_info("ETF_STOP (unhandled)\n"); + break; + case ST90TDS_SEND_SDD: + pr_info("ETF_SDD\n"); + rval = parse_sdd_file(priv, msg->data, msg->len); + break; + case ST90TDS_CONFIG_ADAPTER: + pr_info("ETF_CONFIG_ADAP (unhandled)\n"); + break; + case ST90TDS_SBUS_READ: + pr_info("ETF_SBUS_READ (unhandled)\n"); + break; + case ST90TDS_SBUS_WRITE: + pr_info("ETF_SBUS_WRITE (unhandled)\n"); + break; + case ST90TDS_SET_DEVICE_OPTION: + pr_info("ETF_SET_DEV_OPT (unhandled)\n"); + break; + default: + pr_info("ETF_PASSTHRU (0x%08x)\n", msg->id); + rval = wsm_raw_cmd(priv, (u8 *)msg, len); + break; + } + + return rval; +} +#endif /* CONFIG_CW1200_ETF */ diff --git a/drivers/net/wireless/cw1200/debug.h b/drivers/net/wireless/cw1200/debug.h new file mode 100644 index 000000000000..1fea5b29a819 --- /dev/null +++ b/drivers/net/wireless/cw1200/debug.h @@ -0,0 +1,98 @@ +/* + * DebugFS code for ST-Ericsson CW1200 mac80211 driver + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_DEBUG_H_INCLUDED +#define CW1200_DEBUG_H_INCLUDED + +#include "itp.h" + +struct cw1200_debug_priv { + struct dentry *debugfs_phy; + int tx; + int tx_agg; + int rx; + int rx_agg; + int tx_multi; + int tx_multi_frames; + int tx_cache_miss; + int tx_align; + int tx_ttl; + int tx_burst; + int ba_cnt; + int ba_acc; + int ba_cnt_rx; + int ba_acc_rx; +#ifdef CONFIG_CW1200_ITP + struct cw1200_itp itp; +#endif /* CONFIG_CW1200_ITP */ +}; + +int cw1200_debug_init(struct cw1200_common *priv); +void cw1200_debug_release(struct cw1200_common *priv); + +static inline void cw1200_debug_txed(struct cw1200_common *priv) +{ + ++priv->debug->tx; +} + +static inline void cw1200_debug_txed_agg(struct cw1200_common *priv) +{ + ++priv->debug->tx_agg; +} + +static inline void cw1200_debug_txed_multi(struct cw1200_common *priv, + int count) +{ + ++priv->debug->tx_multi; + priv->debug->tx_multi_frames += count; +} + +static inline void cw1200_debug_rxed(struct cw1200_common *priv) +{ + ++priv->debug->rx; +} + +static inline void cw1200_debug_rxed_agg(struct cw1200_common *priv) +{ + ++priv->debug->rx_agg; +} + +static inline void cw1200_debug_tx_cache_miss(struct cw1200_common *priv) +{ + ++priv->debug->tx_cache_miss; +} + +static inline void cw1200_debug_tx_align(struct cw1200_common *priv) +{ + ++priv->debug->tx_align; +} + +static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv) +{ + ++priv->debug->tx_ttl; +} + +static inline void cw1200_debug_tx_burst(struct cw1200_common *priv) +{ + ++priv->debug->tx_burst; +} + +static inline void cw1200_debug_ba(struct cw1200_common *priv, + int ba_cnt, int ba_acc, + int ba_cnt_rx, int ba_acc_rx) +{ + priv->debug->ba_cnt = ba_cnt; + priv->debug->ba_acc = ba_acc; + priv->debug->ba_cnt_rx = ba_cnt_rx; + priv->debug->ba_acc_rx = ba_acc_rx; +} + +#endif /* CW1200_DEBUG_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c new file mode 100644 index 000000000000..ad01cd2a59ec --- /dev/null +++ b/drivers/net/wireless/cw1200/fwio.c @@ -0,0 +1,525 @@ +/* + * Firmware I/O code for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "cw1200.h" +#include "fwio.h" +#include "hwio.h" +#include "sbus.h" +#include "bh.h" + +static int cw1200_get_hw_type(u32 config_reg_val, int *major_revision) +{ + int hw_type = -1; + u32 silicon_type = (config_reg_val >> 24) & 0x7; + u32 silicon_vers = (config_reg_val >> 31) & 0x1; + + switch (silicon_type) { + case 0x00: + *major_revision = 1; + hw_type = HIF_9000_SILICON_VERSATILE; + break; + case 0x01: + case 0x02: /* CW1x00 */ + case 0x04: /* CW1x60 */ + *major_revision = silicon_type; + if (silicon_vers) + hw_type = HIF_8601_VERSATILE; + else + hw_type = HIF_8601_SILICON; + break; + default: + break; + } + + return hw_type; +} + +static int cw1200_load_firmware_cw1200(struct cw1200_common *priv) +{ + int ret, block, num_blocks; + unsigned i; + u32 val32; + u32 put = 0, get = 0; + u8 *buf = NULL; + const char *fw_path; + const struct firmware *firmware = NULL; + + /* Macroses are local. */ +#define APB_WRITE(reg, val) \ + do { \ + ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define APB_READ(reg, val) \ + do { \ + ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define REG_WRITE(reg, val) \ + do { \ + ret = cw1200_reg_write_32(priv, (reg), (val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define REG_READ(reg, val) \ + do { \ + ret = cw1200_reg_read_32(priv, (reg), &(val)); \ + if (ret < 0) \ + goto error; \ + } while (0) + + switch (priv->hw_revision) { + case CW1200_HW_REV_CUT10: + fw_path = FIRMWARE_CUT10; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_10; + break; + case CW1200_HW_REV_CUT11: + fw_path = FIRMWARE_CUT11; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_11; + break; + case CW1200_HW_REV_CUT20: + fw_path = FIRMWARE_CUT20; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_20; + break; + case CW1200_HW_REV_CUT22: + fw_path = FIRMWARE_CUT22; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_22; + break; + case CW1X60_HW_REV: + fw_path = FIRMWARE_CW1X60; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_CW1X60; + break; + default: + pr_err("Invalid silicon revision %d.\n", priv->hw_revision); + return -EINVAL; + } + + /* Initialize common registers */ + APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, DOWNLOAD_ARE_YOU_HERE); + APB_WRITE(DOWNLOAD_PUT_REG, 0); + APB_WRITE(DOWNLOAD_GET_REG, 0); + APB_WRITE(DOWNLOAD_STATUS_REG, DOWNLOAD_PENDING); + APB_WRITE(DOWNLOAD_FLAGS_REG, 0); + + /* Write the NOP Instruction */ + REG_WRITE(ST90TDS_SRAM_BASE_ADDR_REG_ID, 0xFFF20000); + REG_WRITE(ST90TDS_AHB_DPORT_REG_ID, 0xEAFFFFFE); + + /* Release CPU from RESET */ + REG_READ(ST90TDS_CONFIG_REG_ID, val32); + val32 &= ~ST90TDS_CONFIG_CPU_RESET_BIT; + REG_WRITE(ST90TDS_CONFIG_REG_ID, val32); + + /* Enable Clock */ + val32 &= ~ST90TDS_CONFIG_CPU_CLK_DIS_BIT; + REG_WRITE(ST90TDS_CONFIG_REG_ID, val32); + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) + fw_path = etf_firmware; +#endif + + /* Load a firmware file */ + ret = request_firmware(&firmware, fw_path, priv->pdev); + if (ret) { + pr_err("Can't load firmware file %s.\n", fw_path); + goto error; + } + + buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); + if (!buf) { + pr_err("Can't allocate firmware load buffer.\n"); + ret = -ENOMEM; + goto error; + } + + /* Check if the bootloader is ready */ + for (i = 0; i < 100; i += 1 + i / 2) { + APB_READ(DOWNLOAD_IMAGE_SIZE_REG, val32); + if (val32 == DOWNLOAD_I_AM_HERE) + break; + mdelay(i); + } /* End of for loop */ + + if (val32 != DOWNLOAD_I_AM_HERE) { + pr_err("Bootloader is not ready.\n"); + ret = -ETIMEDOUT; + goto error; + } + + /* Calculcate number of download blocks */ + num_blocks = (firmware->size - 1) / DOWNLOAD_BLOCK_SIZE + 1; + + /* Updating the length in Download Ctrl Area */ + val32 = firmware->size; /* Explicit cast from size_t to u32 */ + APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32); + + /* Firmware downloading loop */ + for (block = 0; block < num_blocks; block++) { + size_t tx_size; + size_t block_size; + + /* check the download status */ + APB_READ(DOWNLOAD_STATUS_REG, val32); + if (val32 != DOWNLOAD_PENDING) { + pr_err("Bootloader reported error %d.\n", val32); + ret = -EIO; + goto error; + } + + /* loop until put - get <= 24K */ + for (i = 0; i < 100; i++) { + APB_READ(DOWNLOAD_GET_REG, get); + if ((put - get) <= + (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) + break; + mdelay(i); + } + + if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) { + pr_err("Timeout waiting for FIFO.\n"); + ret = -ETIMEDOUT; + goto error; + } + + /* calculate the block size */ + tx_size = block_size = min((size_t)(firmware->size - put), + (size_t)DOWNLOAD_BLOCK_SIZE); + + memcpy(buf, &firmware->data[put], block_size); + if (block_size < DOWNLOAD_BLOCK_SIZE) { + memset(&buf[block_size], 0, + DOWNLOAD_BLOCK_SIZE - block_size); + tx_size = DOWNLOAD_BLOCK_SIZE; + } + + /* send the block to sram */ + ret = cw1200_apb_write(priv, + CW1200_APB(DOWNLOAD_FIFO_OFFSET + + (put & (DOWNLOAD_FIFO_SIZE - 1))), + buf, tx_size); + if (ret < 0) { + pr_err("Can't write firmware block @ %d!\n", + put & (DOWNLOAD_FIFO_SIZE - 1)); + goto error; + } + + /* update the put register */ + put += block_size; + APB_WRITE(DOWNLOAD_PUT_REG, put); + } /* End of firmware download loop */ + + /* Wait for the download completion */ + for (i = 0; i < 300; i += 1 + i / 2) { + APB_READ(DOWNLOAD_STATUS_REG, val32); + if (val32 != DOWNLOAD_PENDING) + break; + mdelay(i); + } + if (val32 != DOWNLOAD_SUCCESS) { + pr_err("Wait for download completion failed: 0x%.8X\n", val32); + ret = -ETIMEDOUT; + goto error; + } else { + pr_info("Firmware download completed.\n"); + ret = 0; + } + +error: + kfree(buf); + if (firmware) + release_firmware(firmware); + return ret; + +#undef APB_WRITE +#undef APB_READ +#undef REG_WRITE +#undef REG_READ +} + + +static int config_reg_read(struct cw1200_common *priv, u32 *val) +{ + switch (priv->hw_type) { + case HIF_9000_SILICON_VERSATILE: { + u16 val16; + int ret = cw1200_reg_read_16(priv, + ST90TDS_CONFIG_REG_ID, + &val16); + if (ret < 0) + return ret; + *val = val16; + return 0; + } + case HIF_8601_VERSATILE: + case HIF_8601_SILICON: + default: + cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, val); + break; + } + return 0; +} + +static int config_reg_write(struct cw1200_common *priv, u32 val) +{ + switch (priv->hw_type) { + case HIF_9000_SILICON_VERSATILE: + return cw1200_reg_write_16(priv, + ST90TDS_CONFIG_REG_ID, + (u16)val); + case HIF_8601_VERSATILE: + case HIF_8601_SILICON: + default: + return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val); + break; + } + return 0; +} + +int cw1200_load_firmware(struct cw1200_common *priv) +{ + int ret; + int i; + u32 val32; + u16 val16; + int major_revision = -1; + + /* Read CONFIG Register */ + ret = cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + if (val32 == 0 || val32 == 0xffffffff) { + pr_err("Bad config register value (0x%08x)\n", val32); + ret = -EIO; + goto out; + } + + priv->hw_type = cw1200_get_hw_type(val32, &major_revision); + if (priv->hw_type < 0) { + pr_err("Can't deduce hardware type.\n"); + ret = -ENOTSUPP; + goto out; + } + + /* Set DPLL Reg value, and read back to confirm writes work */ + ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, + cw1200_dpll_from_clk(priv->hw_refclk)); + if (ret < 0) { + pr_err("Can't write DPLL register.\n"); + goto out; + } + + msleep(20); + + ret = cw1200_reg_read_32(priv, + ST90TDS_TSET_GEN_R_W_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read DPLL register.\n"); + goto out; + } + + if (val32 != cw1200_dpll_from_clk(priv->hw_refclk)) { + pr_err("Unable to initialise DPLL register. Wrote 0x%.8X, Read 0x%.8X.\n", + cw1200_dpll_from_clk(priv->hw_refclk), val32); + ret = -EIO; + goto out; + } + + /* Set wakeup bit in device */ + ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16); + if (ret < 0) { + pr_err("set_wakeup: can't read control register.\n"); + goto out; + } + + ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, + val16 | ST90TDS_CONT_WUP_BIT); + if (ret < 0) { + pr_err("set_wakeup: can't write control register.\n"); + goto out; + } + + /* Wait for wakeup */ + for (i = 0; i < 300; i += (1 + i / 2)) { + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, &val16); + if (ret < 0) { + pr_err("wait_for_wakeup: can't read control register.\n"); + goto out; + } + + if (val16 & ST90TDS_CONT_RDY_BIT) + break; + + msleep(i); + } + + if ((val16 & ST90TDS_CONT_RDY_BIT) == 0) { + pr_err("wait_for_wakeup: device is not responding.\n"); + ret = -ETIMEDOUT; + goto out; + } + + switch (major_revision) { + case 1: + /* CW1200 Hardware detection logic : Check for CUT1.1 */ + ret = cw1200_ahb_read_32(priv, CW1200_CUT_ID_ADDR, &val32); + if (ret) { + pr_err("HW detection: can't read CUT ID.\n"); + goto out; + } + + switch (val32) { + case CW1200_CUT_11_ID_STR: + pr_info("CW1x00 Cut 1.1 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT11; + break; + default: + pr_info("CW1x00 Cut 1.0 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT10; + break; + } + + /* According to ST-E, CUT<2.0 has busted BA TID0-3. + Just disable it entirely... + */ + priv->ba_rx_tid_mask = 0; + priv->ba_tx_tid_mask = 0; + break; + case 2: { + u32 ar1, ar2, ar3; + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR, &ar1); + if (ret) { + pr_err("(1) HW detection: can't read CUT ID\n"); + goto out; + } + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 4, &ar2); + if (ret) { + pr_err("(2) HW detection: can't read CUT ID.\n"); + goto out; + } + + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 8, &ar3); + if (ret) { + pr_err("(3) HW detection: can't read CUT ID.\n"); + goto out; + } + + if (ar1 == CW1200_CUT_22_ID_STR1 && + ar2 == CW1200_CUT_22_ID_STR2 && + ar3 == CW1200_CUT_22_ID_STR3) { + pr_info("CW1x00 Cut 2.2 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT22; + } else { + pr_info("CW1x00 Cut 2.0 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT20; + } + break; + } + case 4: + pr_info("CW1x60 silicon detected.\n"); + priv->hw_revision = CW1X60_HW_REV; + break; + default: + pr_err("Unsupported silicon major revision %d.\n", + major_revision); + ret = -ENOTSUPP; + goto out; + } + + /* Checking for access mode */ + ret = config_reg_read(priv, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) { + pr_err("Device is already in QUEUE mode!\n"); + ret = -EINVAL; + goto out; + } + + switch (priv->hw_type) { + case HIF_8601_SILICON: + if (priv->hw_revision == CW1X60_HW_REV) { + pr_err("Can't handle CW1160/1260 firmware load yet.\n"); + ret = -ENOTSUPP; + goto out; + } + ret = cw1200_load_firmware_cw1200(priv); + break; + default: + pr_err("Can't perform firmware load for hw type %d.\n", + priv->hw_type); + ret = -ENOTSUPP; + goto out; + } + if (ret < 0) { + pr_err("Firmware load error.\n"); + goto out; + } + + /* Enable interrupt signalling */ + priv->sbus_ops->lock(priv->sbus_priv); + ret = __cw1200_irq_enable(priv, 1); + priv->sbus_ops->unlock(priv->sbus_priv); + if (ret < 0) + goto unsubscribe; + + /* Configure device for MESSSAGE MODE */ + ret = config_reg_read(priv, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto unsubscribe; + } + ret = config_reg_write(priv, val32 & ~ST90TDS_CONFIG_ACCESS_MODE_BIT); + if (ret < 0) { + pr_err("Can't write config register.\n"); + goto unsubscribe; + } + + /* Unless we read the CONFIG Register we are + * not able to get an interrupt + */ + mdelay(10); + config_reg_read(priv, &val32); + +out: + return ret; + +unsubscribe: + /* Disable interrupt signalling */ + priv->sbus_ops->lock(priv->sbus_priv); + ret = __cw1200_irq_enable(priv, 0); + priv->sbus_ops->unlock(priv->sbus_priv); + return ret; +} diff --git a/drivers/net/wireless/cw1200/fwio.h b/drivers/net/wireless/cw1200/fwio.h new file mode 100644 index 000000000000..ea3099362cdf --- /dev/null +++ b/drivers/net/wireless/cw1200/fwio.h @@ -0,0 +1,39 @@ +/* + * Firmware API for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef FWIO_H_INCLUDED +#define FWIO_H_INCLUDED + +#define BOOTLOADER_CW1X60 "boot_cw1x60.bin" +#define FIRMWARE_CW1X60 "wsm_cw1x60.bin" +#define FIRMWARE_CUT22 "wsm_22.bin" +#define FIRMWARE_CUT20 "wsm_20.bin" +#define FIRMWARE_CUT11 "wsm_11.bin" +#define FIRMWARE_CUT10 "wsm_10.bin" +#define SDD_FILE_CW1X60 "sdd_cw1x60.bin" +#define SDD_FILE_22 "sdd_22.bin" +#define SDD_FILE_20 "sdd_20.bin" +#define SDD_FILE_11 "sdd_11.bin" +#define SDD_FILE_10 "sdd_10.bin" + +int cw1200_load_firmware(struct cw1200_common *priv); + +/* SDD definitions */ +#define SDD_PTA_CFG_ELT_ID 0xEB +#define SDD_REFERENCE_FREQUENCY_ELT_ID 0xc5 +u32 cw1200_dpll_from_clk(u16 clk); + +#endif diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c new file mode 100644 index 000000000000..1af7b3d421b2 --- /dev/null +++ b/drivers/net/wireless/cw1200/hwio.c @@ -0,0 +1,311 @@ +/* + * Low-level device IO routines for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * Based on: + * ST-Ericsson UMAC CW1200 driver, which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include "cw1200.h" +#include "hwio.h" +#include "sbus.h" + + /* Sdio addr is 4*spi_addr */ +#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2) +#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \ + ((((buf_id) & 0x1F) << 7) \ + | (((mpf) & 1) << 6) \ + | (((rfu) & 1) << 5) \ + | (((reg_id_ofs) & 0x1F) << 0)) +#define MAX_RETRY 3 + + +static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr, + void *buf, size_t buf_len, int buf_id) +{ + u16 addr_sdio; + u32 sdio_reg_addr_17bit; + + /* Check if buffer is aligned to 4 byte boundary */ + if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) { + pr_err("buffer is not aligned.\n"); + return -EINVAL; + } + + /* Convert to SDIO Register Address */ + addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); + sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); + + return priv->sbus_ops->sbus_memcpy_fromio(priv->sbus_priv, + sdio_reg_addr_17bit, + buf, buf_len); +} + +static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr, + const void *buf, size_t buf_len, int buf_id) +{ + u16 addr_sdio; + u32 sdio_reg_addr_17bit; + + /* Convert to SDIO Register Address */ + addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); + sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); + + return priv->sbus_ops->sbus_memcpy_toio(priv->sbus_priv, + sdio_reg_addr_17bit, + buf, buf_len); +} + +static inline int __cw1200_reg_read_32(struct cw1200_common *priv, + u16 addr, u32 *val) +{ + int i = __cw1200_reg_read(priv, addr, val, sizeof(*val), 0); + *val = le32_to_cpu(*val); + return i; +} + +static inline int __cw1200_reg_write_32(struct cw1200_common *priv, + u16 addr, u32 val) +{ + val = cpu_to_le32(val); + return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0); +} + +static inline int __cw1200_reg_read_16(struct cw1200_common *priv, + u16 addr, u16 *val) +{ + int i = __cw1200_reg_read(priv, addr, val, sizeof(*val), 0); + *val = le16_to_cpu(*val); + return i; +} + +static inline int __cw1200_reg_write_16(struct cw1200_common *priv, + u16 addr, u16 val) +{ + val = cpu_to_le16(val); + return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0); +} + +int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf, + size_t buf_len) +{ + int ret; + priv->sbus_ops->lock(priv->sbus_priv); + ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0); + priv->sbus_ops->unlock(priv->sbus_priv); + return ret; +} + +int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf, + size_t buf_len) +{ + int ret; + priv->sbus_ops->lock(priv->sbus_priv); + ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0); + priv->sbus_ops->unlock(priv->sbus_priv); + return ret; +} + +int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len) +{ + int ret, retry = 1; + int buf_id_rx = priv->buf_id_rx; + + priv->sbus_ops->lock(priv->sbus_priv); + + while (retry <= MAX_RETRY) { + ret = __cw1200_reg_read(priv, + ST90TDS_IN_OUT_QUEUE_REG_ID, buf, + buf_len, buf_id_rx + 1); + if (!ret) { + buf_id_rx = (buf_id_rx + 1) & 3; + priv->buf_id_rx = buf_id_rx; + break; + } else { + retry++; + mdelay(1); + pr_err("error :[%d]\n", ret); + } + } + + priv->sbus_ops->unlock(priv->sbus_priv); + return ret; +} + +int cw1200_data_write(struct cw1200_common *priv, const void *buf, + size_t buf_len) +{ + int ret, retry = 1; + int buf_id_tx = priv->buf_id_tx; + + priv->sbus_ops->lock(priv->sbus_priv); + + while (retry <= MAX_RETRY) { + ret = __cw1200_reg_write(priv, + ST90TDS_IN_OUT_QUEUE_REG_ID, buf, + buf_len, buf_id_tx); + if (!ret) { + buf_id_tx = (buf_id_tx + 1) & 31; + priv->buf_id_tx = buf_id_tx; + break; + } else { + retry++; + mdelay(1); + pr_err("error :[%d]\n", ret); + } + } + + priv->sbus_ops->unlock(priv->sbus_priv); + return ret; +} + +int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf, + size_t buf_len, u32 prefetch, u16 port_addr) +{ + u32 val32 = 0; + int i, ret; + + if ((buf_len / 2) >= 0x1000) { + pr_err("Can't read more than 0xfff words.\n"); + return -EINVAL; + goto out; + } + + priv->sbus_ops->lock(priv->sbus_priv); + /* Write address */ + ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); + if (ret < 0) { + pr_err("Can't write address register.\n"); + goto out; + } + + /* Read CONFIG Register Value - We will read 32 bits */ + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + /* Set PREFETCH bit */ + ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, + val32 | prefetch); + if (ret < 0) { + pr_err("Can't write prefetch bit.\n"); + goto out; + } + + /* Check for PRE-FETCH bit to be cleared */ + for (i = 0; i < 20; i++) { + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't check prefetch bit.\n"); + goto out; + } + if (!(val32 & prefetch)) + break; + + mdelay(i); + } + + if (val32 & prefetch) { + pr_err("Prefetch bit is not cleared.\n"); + goto out; + } + + /* Read data port */ + ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0); + if (ret < 0) { + pr_err("Can't read data port.\n"); + goto out; + } + +out: + priv->sbus_ops->unlock(priv->sbus_priv); + return ret; +} + +int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf, + size_t buf_len) +{ + int ret; + + if ((buf_len / 2) >= 0x1000) { + pr_err("Can't write more than 0xfff words.\n"); + return -EINVAL; + } + + priv->sbus_ops->lock(priv->sbus_priv); + + /* Write address */ + ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); + if (ret < 0) { + pr_err("Can't write address register.\n"); + goto out; + } + + /* Write data port */ + ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID, + buf, buf_len, 0); + if (ret < 0) { + pr_err("Can't write data port.\n"); + goto out; + } + +out: + priv->sbus_ops->unlock(priv->sbus_priv); + return ret; +} + +int __cw1200_irq_enable(struct cw1200_common *priv, int enable) +{ + u32 val32; + u16 val16; + int ret; + + if (HIF_8601_SILICON == priv->hw_type) { + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + return ret; + } + + if (enable) + val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE; + else + val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE; + + ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32); + if (ret < 0) { + pr_err("Can't write config register.\n"); + return ret; + } + } else { + ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16); + if (ret < 0) { + pr_err("Can't read control register.\n"); + return ret; + } + + if (enable) + val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE; + else + val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE; + + ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16); + if (ret < 0) { + pr_err("Can't write control register.\n"); + return ret; + } + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/hwio.h b/drivers/net/wireless/cw1200/hwio.h new file mode 100644 index 000000000000..7ee73fe32ccf --- /dev/null +++ b/drivers/net/wireless/cw1200/hwio.h @@ -0,0 +1,247 @@ +/* + * Low-level API for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_HWIO_H_INCLUDED +#define CW1200_HWIO_H_INCLUDED + +/* extern */ struct cw1200_common; + +#define CW1200_CUT_11_ID_STR (0x302E3830) +#define CW1200_CUT_22_ID_STR1 (0x302e3132) +#define CW1200_CUT_22_ID_STR2 (0x32302e30) +#define CW1200_CUT_22_ID_STR3 (0x3335) +#define CW1200_CUT_ID_ADDR (0xFFF17F90) +#define CW1200_CUT2_ID_ADDR (0xFFF1FF90) + +/* Download control area */ +/* boot loader start address in SRAM */ +#define DOWNLOAD_BOOT_LOADER_OFFSET (0x00000000) +/* 32K, 0x4000 to 0xDFFF */ +#define DOWNLOAD_FIFO_OFFSET (0x00004000) +/* 32K */ +#define DOWNLOAD_FIFO_SIZE (0x00008000) +/* 128 bytes, 0xFF80 to 0xFFFF */ +#define DOWNLOAD_CTRL_OFFSET (0x0000FF80) +#define DOWNLOAD_CTRL_DATA_DWORDS (32-6) + +struct download_cntl_t { + /* size of whole firmware file (including Cheksum), host init */ + u32 image_size; + /* downloading flags */ + u32 flags; + /* No. of bytes put into the download, init & updated by host */ + u32 put; + /* last traced program counter, last ARM reg_pc */ + u32 trace_pc; + /* No. of bytes read from the download, host init, device updates */ + u32 get; + /* r0, boot losader status, host init to pending, device updates */ + u32 status; + /* Extra debug info, r1 to r14 if status=r0=DOWNLOAD_EXCEPTION */ + u32 debug_data[DOWNLOAD_CTRL_DATA_DWORDS]; +}; + +#define DOWNLOAD_IMAGE_SIZE_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, image_size)) +#define DOWNLOAD_FLAGS_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, flags)) +#define DOWNLOAD_PUT_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, put)) +#define DOWNLOAD_TRACE_PC_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, trace_pc)) +#define DOWNLOAD_GET_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, get)) +#define DOWNLOAD_STATUS_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, status)) +#define DOWNLOAD_DEBUG_DATA_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, debug_data)) +#define DOWNLOAD_DEBUG_DATA_LEN (108) + +#define DOWNLOAD_BLOCK_SIZE (1024) + +/* For boot loader detection */ +#define DOWNLOAD_ARE_YOU_HERE (0x87654321) +#define DOWNLOAD_I_AM_HERE (0x12345678) + +/* Download error code */ +#define DOWNLOAD_PENDING (0xFFFFFFFF) +#define DOWNLOAD_SUCCESS (0) +#define DOWNLOAD_EXCEPTION (1) +#define DOWNLOAD_ERR_MEM_1 (2) +#define DOWNLOAD_ERR_MEM_2 (3) +#define DOWNLOAD_ERR_SOFTWARE (4) +#define DOWNLOAD_ERR_FILE_SIZE (5) +#define DOWNLOAD_ERR_CHECKSUM (6) +#define DOWNLOAD_ERR_OVERFLOW (7) +#define DOWNLOAD_ERR_IMAGE (8) +#define DOWNLOAD_ERR_HOST (9) +#define DOWNLOAD_ERR_ABORT (10) + + +#define SYS_BASE_ADDR_SILICON (0) +#define PAC_BASE_ADDRESS_SILICON (SYS_BASE_ADDR_SILICON + 0x09000000) +#define PAC_SHARED_MEMORY_SILICON (PAC_BASE_ADDRESS_SILICON) + +#define CW1200_APB(addr) (PAC_SHARED_MEMORY_SILICON + (addr)) + +/* *************************************************************** +*Device register definitions +*************************************************************** */ +/* WBF - SPI Register Addresses */ +#define ST90TDS_ADDR_ID_BASE (0x0000) +/* 16/32 bits */ +#define ST90TDS_CONFIG_REG_ID (0x0000) +/* 16/32 bits */ +#define ST90TDS_CONTROL_REG_ID (0x0001) +/* 16 bits, Q mode W/R */ +#define ST90TDS_IN_OUT_QUEUE_REG_ID (0x0002) +/* 32 bits, AHB bus R/W */ +#define ST90TDS_AHB_DPORT_REG_ID (0x0003) +/* 16/32 bits */ +#define ST90TDS_SRAM_BASE_ADDR_REG_ID (0x0004) +/* 32 bits, APB bus R/W */ +#define ST90TDS_SRAM_DPORT_REG_ID (0x0005) +/* 32 bits, t_settle/general */ +#define ST90TDS_TSET_GEN_R_W_REG_ID (0x0006) +/* 16 bits, Q mode read, no length */ +#define ST90TDS_FRAME_OUT_REG_ID (0x0007) +#define ST90TDS_ADDR_ID_MAX (ST90TDS_FRAME_OUT_REG_ID) + +/* WBF - Control register bit set */ +/* next o/p length, bit 11 to 0 */ +#define ST90TDS_CONT_NEXT_LEN_MASK (0x0FFF) +#define ST90TDS_CONT_WUP_BIT (BIT(12)) +#define ST90TDS_CONT_RDY_BIT (BIT(13)) +#define ST90TDS_CONT_IRQ_ENABLE (BIT(14)) +#define ST90TDS_CONT_RDY_ENABLE (BIT(15)) +#define ST90TDS_CONT_IRQ_RDY_ENABLE (BIT(14)|BIT(15)) + +/* SPI Config register bit set */ +#define ST90TDS_CONFIG_FRAME_BIT (BIT(2)) +#define ST90TDS_CONFIG_WORD_MODE_BITS (BIT(3)|BIT(4)) +#define ST90TDS_CONFIG_WORD_MODE_1 (BIT(3)) +#define ST90TDS_CONFIG_WORD_MODE_2 (BIT(4)) +#define ST90TDS_CONFIG_ERROR_0_BIT (BIT(5)) +#define ST90TDS_CONFIG_ERROR_1_BIT (BIT(6)) +#define ST90TDS_CONFIG_ERROR_2_BIT (BIT(7)) +/* TBD: Sure??? */ +#define ST90TDS_CONFIG_CSN_FRAME_BIT (BIT(7)) +#define ST90TDS_CONFIG_ERROR_3_BIT (BIT(8)) +#define ST90TDS_CONFIG_ERROR_4_BIT (BIT(9)) +/* QueueM */ +#define ST90TDS_CONFIG_ACCESS_MODE_BIT (BIT(10)) +/* AHB bus */ +#define ST90TDS_CONFIG_AHB_PRFETCH_BIT (BIT(11)) +#define ST90TDS_CONFIG_CPU_CLK_DIS_BIT (BIT(12)) +/* APB bus */ +#define ST90TDS_CONFIG_PRFETCH_BIT (BIT(13)) +/* cpu reset */ +#define ST90TDS_CONFIG_CPU_RESET_BIT (BIT(14)) +#define ST90TDS_CONFIG_CLEAR_INT_BIT (BIT(15)) + +/* For CW1200 the IRQ Enable and Ready Bits are in CONFIG register */ +#define ST90TDS_CONF_IRQ_ENABLE (BIT(16)) +#define ST90TDS_CONF_RDY_ENABLE (BIT(17)) +#define ST90TDS_CONF_IRQ_RDY_ENABLE (BIT(16)|BIT(17)) + +int cw1200_data_read(struct cw1200_common *priv, + void *buf, size_t buf_len); +int cw1200_data_write(struct cw1200_common *priv, + const void *buf, size_t buf_len); + +int cw1200_reg_read(struct cw1200_common *priv, u16 addr, + void *buf, size_t buf_len); +int cw1200_reg_write(struct cw1200_common *priv, u16 addr, + const void *buf, size_t buf_len); + +static inline int cw1200_reg_read_16(struct cw1200_common *priv, + u16 addr, u16 *val) +{ + u32 tmp; + int i; + i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp)); + tmp = le32_to_cpu(tmp); + *val = tmp & 0xffff; + return i; +} + +static inline int cw1200_reg_write_16(struct cw1200_common *priv, + u16 addr, u16 val) +{ + u32 tmp = val; + tmp = cpu_to_le32(tmp); + return cw1200_reg_write(priv, addr, &tmp, sizeof(tmp)); +} + +static inline int cw1200_reg_read_32(struct cw1200_common *priv, + u16 addr, u32 *val) +{ + int i = cw1200_reg_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +static inline int cw1200_reg_write_32(struct cw1200_common *priv, + u16 addr, u32 val) +{ + val = cpu_to_le32(val); + return cw1200_reg_write(priv, addr, &val, sizeof(val)); +} + +int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf, + size_t buf_len, u32 prefetch, u16 port_addr); +int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf, + size_t buf_len); + +static inline int cw1200_apb_read(struct cw1200_common *priv, u32 addr, + void *buf, size_t buf_len) +{ + return cw1200_indirect_read(priv, addr, buf, buf_len, + ST90TDS_CONFIG_PRFETCH_BIT, + ST90TDS_SRAM_DPORT_REG_ID); +} + +static inline int cw1200_ahb_read(struct cw1200_common *priv, u32 addr, + void *buf, size_t buf_len) +{ + return cw1200_indirect_read(priv, addr, buf, buf_len, + ST90TDS_CONFIG_AHB_PRFETCH_BIT, + ST90TDS_AHB_DPORT_REG_ID); +} + +static inline int cw1200_apb_read_32(struct cw1200_common *priv, + u32 addr, u32 *val) +{ + int i = cw1200_apb_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +static inline int cw1200_apb_write_32(struct cw1200_common *priv, + u32 addr, u32 val) +{ + val = cpu_to_le32(val); + return cw1200_apb_write(priv, addr, &val, sizeof(val)); +} +static inline int cw1200_ahb_read_32(struct cw1200_common *priv, + u32 addr, u32 *val) +{ + int i = cw1200_ahb_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +#endif /* CW1200_HWIO_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/itp.c b/drivers/net/wireless/cw1200/itp.c new file mode 100644 index 000000000000..c0730bb49b75 --- /dev/null +++ b/drivers/net/wireless/cw1200/itp.c @@ -0,0 +1,730 @@ +/* + * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers + * ITP code + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "cw1200.h" +#include "debug.h" +#include "itp.h" +#include "sta.h" + +static int __cw1200_itp_open(struct cw1200_common *priv); +static int __cw1200_itp_close(struct cw1200_common *priv); +static void cw1200_itp_rx_start(struct cw1200_common *priv); +static void cw1200_itp_rx_stop(struct cw1200_common *priv); +static void cw1200_itp_rx_stats(struct cw1200_common *priv); +static void cw1200_itp_rx_reset(struct cw1200_common *priv); +static void cw1200_itp_tx_stop(struct cw1200_common *priv); +static void cw1200_itp_handle(struct cw1200_common *priv, + struct sk_buff *skb); +static void cw1200_itp_err(struct cw1200_common *priv, + int err, + int arg); +static void __cw1200_itp_tx_stop(struct cw1200_common *priv); + +static ssize_t cw1200_itp_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + struct cw1200_common *priv = file->private_data; + struct cw1200_itp *itp = &priv->debug->itp; + struct sk_buff *skb; + int ret; + + if (skb_queue_empty(&itp->log_queue)) + return 0; + + skb = skb_dequeue(&itp->log_queue); + ret = copy_to_user(user_buf, skb->data, skb->len); + *ppos += skb->len; + skb->data[skb->len] = 0; + pr_debug("[ITP] >>> %s", skb->data); + consume_skb(skb); + + return skb->len - ret; +} + +static ssize_t cw1200_itp_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct cw1200_common *priv = file->private_data; + struct sk_buff *skb; + + if (!count || count > 1024) + return -EINVAL; + skb = dev_alloc_skb(count + 1); + if (!skb) + return -ENOMEM; + skb_trim(skb, 0); + skb_put(skb, count + 1); + if (copy_from_user(skb->data, user_buf, count)) { + kfree_skb(skb); + return -EFAULT; + } + skb->data[count] = 0; + + cw1200_itp_handle(priv, skb); + consume_skb(skb); + return count; +} + +static unsigned int cw1200_itp_poll(struct file *file, poll_table *wait) +{ + struct cw1200_common *priv = file->private_data; + struct cw1200_itp *itp = &priv->debug->itp; + unsigned int mask = 0; + + poll_wait(file, &itp->read_wait, wait); + + if (!skb_queue_empty(&itp->log_queue)) + mask |= POLLIN | POLLRDNORM; + + mask |= POLLOUT | POLLWRNORM; + + return mask; +} + +static int cw1200_itp_open(struct inode *inode, struct file *file) +{ + struct cw1200_common *priv = inode->i_private; + struct cw1200_itp *itp = &priv->debug->itp; + int ret = 0; + + file->private_data = priv; + if (atomic_inc_return(&itp->open_count) == 1) { + ret = __cw1200_itp_open(priv); + if (ret && !atomic_dec_return(&itp->open_count)) + __cw1200_itp_close(priv); + } else { + atomic_dec(&itp->open_count); + ret = -EBUSY; + } + + return ret; +} + +static int cw1200_itp_close(struct inode *inode, struct file *file) +{ + struct cw1200_common *priv = file->private_data; + struct cw1200_itp *itp = &priv->debug->itp; + if (!atomic_dec_return(&itp->open_count)) { + __cw1200_itp_close(priv); + wake_up(&itp->close_wait); + } + return 0; +} + +static const struct file_operations fops_itp = { + .open = cw1200_itp_open, + .read = cw1200_itp_read, + .write = cw1200_itp_write, + .poll = cw1200_itp_poll, + .release = cw1200_itp_close, + .llseek = default_llseek, + .owner = THIS_MODULE, +}; + +static void cw1200_itp_fill_pattern(u8 *data, int size, + enum cw1200_itp_data_modes mode) +{ + if (size <= 0) + return; + + switch (mode) { + default: + case ITP_DATA_ZEROS: + memset(data, 0x0, size); + break; + case ITP_DATA_ONES: + memset(data, 0xff, size); + break; + case ITP_DATA_ZERONES: + memset(data, 0x55, size); + break; + case ITP_DATA_RANDOM: + get_random_bytes(data, size); + break; + } + return; +} + +static void cw1200_itp_tx_work(struct work_struct *work) +{ + struct cw1200_itp *itp = container_of(work, struct cw1200_itp, + tx_work.work); + struct cw1200_common *priv = itp->priv; + atomic_set(&priv->bh_tx, 1); + wake_up(&priv->bh_wq); +} + +static void cw1200_itp_tx_finish(struct work_struct *work) +{ + struct cw1200_itp *itp = container_of(work, struct cw1200_itp, + tx_finish.work); + __cw1200_itp_tx_stop(itp->priv); +} + +int cw1200_itp_init(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + + itp->priv = priv; + atomic_set(&itp->open_count, 0); + atomic_set(&itp->stop_tx, 0); + atomic_set(&itp->awaiting_confirm, 0); + skb_queue_head_init(&itp->log_queue); + spin_lock_init(&itp->tx_lock); + init_waitqueue_head(&itp->read_wait); + init_waitqueue_head(&itp->write_wait); + init_waitqueue_head(&itp->close_wait); + INIT_DELAYED_WORK(&itp->tx_work, cw1200_itp_tx_work); + INIT_DELAYED_WORK(&itp->tx_finish, cw1200_itp_tx_finish); + itp->data = NULL; + itp->hdr_len = WSM_TX_EXTRA_HEADROOM + + sizeof(struct ieee80211_hdr_3addr); + + if (!debugfs_create_file("itp", S_IRUSR | S_IWUSR, + priv->debug->debugfs_phy, priv, &fops_itp)) + return -ENOMEM; + + return 0; +} + +void cw1200_itp_release(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + + wait_event_interruptible(itp->close_wait, + !atomic_read(&itp->open_count)); + + WARN_ON(atomic_read(&itp->open_count)); + + skb_queue_purge(&itp->log_queue); + cw1200_itp_tx_stop(priv); +} + +static int __cw1200_itp_open(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + + if (!priv->vif) + return -EINVAL; + if (priv->join_status) + return -EINVAL; + itp->saved_channel = priv->channel; + if (!priv->channel) + priv->channel = &priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; + wsm_set_bssid_filtering(priv, false); + cw1200_itp_rx_reset(priv); + return 0; +} + +static int __cw1200_itp_close(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + if (atomic_read(&itp->test_mode) == TEST_MODE_RX_TEST) + cw1200_itp_rx_stop(priv); + cw1200_itp_tx_stop(priv); + cw1200_disable_listening(priv); + cw1200_update_filtering(priv); + priv->channel = itp->saved_channel; + return 0; +} + +bool cw1200_is_itp(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + return atomic_read(&itp->open_count) != 0; +} + +static void cw1200_itp_rx_reset(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + itp->rx_cnt = 0; + itp->rx_rssi = 0; + itp->rx_rssi_max = -1000; + itp->rx_rssi_min = 1000; +} + +static void cw1200_itp_rx_start(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + + pr_debug("[ITP] RX start, band = %d, ch = %d\n", + itp->band, itp->ch); + atomic_set(&itp->test_mode, TEST_MODE_RX_TEST); + cw1200_update_listening(priv, false); + priv->channel = &priv->hw-> + wiphy->bands[itp->band]->channels[itp->ch]; + cw1200_update_listening(priv, true); + wsm_set_bssid_filtering(priv, false); +} + +static void cw1200_itp_rx_stop(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + pr_debug("[ITP] RX stop\n"); + atomic_set(&itp->test_mode, TEST_MODE_NO_TEST); + cw1200_itp_rx_reset(priv); +} + +static void cw1200_itp_rx_stats(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + struct sk_buff *skb; + char buf[128]; + int len, ret; + struct wsm_mib_counters_table counters; + + ret = wsm_get_counters_table(priv, &counters); + + if (ret) + cw1200_itp_err(priv, -EBUSY, 20); + + if (!itp->rx_cnt) + len = snprintf(buf, sizeof(buf), "1,0,0,0,0,%d\n", + counters.rx_packet_errors); + else + len = snprintf(buf, sizeof(buf), "1,%d,%ld,%d,%d,%d\n", + itp->rx_cnt, + itp->rx_cnt ? itp->rx_rssi / itp->rx_cnt : 0, + itp->rx_rssi_min, itp->rx_rssi_max, + counters.rx_packet_errors); + + if (len <= 0) { + cw1200_itp_err(priv, -EBUSY, 21); + return; + } + + skb = dev_alloc_skb(len); + if (!skb) { + cw1200_itp_err(priv, -ENOMEM, 22); + return; + } + + itp->rx_cnt = 0; + itp->rx_rssi = 0; + itp->rx_rssi_max = -1000; + itp->rx_rssi_min = 1000; + + skb_trim(skb, 0); + skb_put(skb, len); + + memcpy(skb->data, buf, len); + skb_queue_tail(&itp->log_queue, skb); + wake_up(&itp->read_wait); +} + +static void cw1200_itp_tx_start(struct cw1200_common *priv) +{ + struct wsm_tx *tx; + struct ieee80211_hdr_3addr *hdr; + struct cw1200_itp *itp = &priv->debug->itp; + struct wsm_mib_association_mode assoc_mode = { + .flags = WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE, + .preamble = itp->preamble, + }; + int len; + u8 da_addr[6] = ITP_DEFAULT_DA_ADDR; + + /* Rates index 4 and 5 are not supported */ + if (itp->rate > 3) + itp->rate += 2; + + pr_debug("[ITP] TX start: band = %d, ch = %d, rate = %d, preamble = %d, number = %d, data_mode = %d, interval = %d, power = %d, data_len = %d\n", + itp->band, itp->ch, itp->rate, itp->preamble, + itp->number, itp->data_mode, itp->interval_us, + itp->power, itp->data_len); + + len = itp->hdr_len + itp->data_len; + + itp->data = kmalloc(len, GFP_KERNEL); + tx = (struct wsm_tx *)itp->data; + tx->hdr.len = itp->data_len + itp->hdr_len; + tx->hdr.id = __cpu_to_le16(0x0004 | 1 << 6); + tx->max_tx_rate = itp->rate; + tx->queue_id = 3; + tx->more = 0; + tx->flags = 0xc; + tx->packet_id = 0x55ff55; + tx->reserved = 0; + tx->expire_time = 1; + + if (itp->preamble == ITP_PREAMBLE_GREENFIELD) + tx->ht_tx_parameters = WSM_HT_TX_GREENFIELD; + else if (itp->preamble == ITP_PREAMBLE_MIXED) + tx->ht_tx_parameters = WSM_HT_TX_MIXED; + + hdr = (struct ieee80211_hdr_3addr *)&itp->data[sizeof(struct wsm_tx)]; + memset(hdr, 0, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); + memcpy(hdr->addr1, da_addr, ETH_ALEN); + memcpy(hdr->addr2, priv->vif->addr, ETH_ALEN); + memcpy(hdr->addr3, da_addr, ETH_ALEN); + + cw1200_itp_fill_pattern(&itp->data[itp->hdr_len], + itp->data_len, itp->data_mode); + + cw1200_update_listening(priv, false); + priv->channel = &priv->hw->wiphy->bands[itp->band]->channels[itp->ch]; + WARN_ON(wsm_set_output_power(priv, itp->power)); + if (itp->preamble == ITP_PREAMBLE_SHORT || + itp->preamble == ITP_PREAMBLE_LONG) + WARN_ON(wsm_set_association_mode(priv, + &assoc_mode)); + wsm_set_bssid_filtering(priv, false); + cw1200_update_listening(priv, true); + + spin_lock_bh(&itp->tx_lock); + atomic_set(&itp->test_mode, TEST_MODE_TX_TEST); + atomic_set(&itp->awaiting_confirm, 0); + atomic_set(&itp->stop_tx, 0); + atomic_set(&priv->bh_tx, 1); + ktime_get_ts(&itp->last_sent); + wake_up(&priv->bh_wq); + spin_unlock_bh(&itp->tx_lock); +} + +void __cw1200_itp_tx_stop(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + spin_lock_bh(&itp->tx_lock); + kfree(itp->data); + itp->data = NULL; + atomic_set(&itp->test_mode, TEST_MODE_NO_TEST); + spin_unlock_bh(&itp->tx_lock); +} + +static void cw1200_itp_tx_stop(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + pr_debug("[ITP] TX stop\n"); + atomic_set(&itp->stop_tx, 1); + flush_workqueue(priv->workqueue); + + /* time for FW to confirm all tx requests */ + msleep(500); + + __cw1200_itp_tx_stop(priv); +} + +static int cw1200_print_fw_version(struct cw1200_common *priv, + u8 *buf, size_t len) +{ + return snprintf(buf, len, "%s %d.%d", + cw1200_fw_types[priv->wsm_caps.fw_type], + priv->wsm_caps.fw_ver, + priv->wsm_caps.fw_build); +} + +static void cw1200_itp_get_version(struct cw1200_common *priv, + enum cw1200_itp_version_type type) +{ + struct cw1200_itp *itp = &priv->debug->itp; + struct sk_buff *skb; + char buf[ITP_BUF_SIZE]; + size_t size = 0; + int len; + pr_debug("[ITP] print %s version\n", + type == ITP_CHIP_ID ? "chip" : "firmware"); + + len = snprintf(buf, ITP_BUF_SIZE, "2,"); + if (len <= 0) { + cw1200_itp_err(priv, -EINVAL, 40); + return; + } + size += len; + + switch (type) { + case ITP_CHIP_ID: + len = cw1200_print_fw_version(priv, buf+size, + ITP_BUF_SIZE - size); + + if (len <= 0) { + cw1200_itp_err(priv, -EINVAL, 41); + return; + } + size += len; + break; + case ITP_FW_VER: + len = snprintf(buf+size, ITP_BUF_SIZE - size, + "%d.%d", priv->wsm_caps.hw_id, + priv->wsm_caps.hw_subid); + if (len <= 0) { + cw1200_itp_err(priv, -EINVAL, 42); + return; + } + size += len; + break; + default: + cw1200_itp_err(priv, -EINVAL, 43); + break; + } + + len = snprintf(buf+size, ITP_BUF_SIZE-size, "\n"); + if (len <= 0) { + cw1200_itp_err(priv, -EINVAL, 44); + return; + } + size += len; + + skb = dev_alloc_skb(size); + if (!skb) { + cw1200_itp_err(priv, -ENOMEM, 45); + return; + } + + skb_trim(skb, 0); + skb_put(skb, size); + + memcpy(skb->data, buf, size); + skb_queue_tail(&itp->log_queue, skb); + wake_up(&itp->read_wait); +} + +int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst) +{ + struct cw1200_itp *itp; + struct timespec now; + int time_left_us; + + if (!priv->debug) + return 0; + + itp = &priv->debug->itp; + + if (!itp) + return 0; + + spin_lock_bh(&itp->tx_lock); + if (atomic_read(&itp->test_mode) != TEST_MODE_TX_TEST) + goto out; + + if (atomic_read(&itp->stop_tx)) + goto out; + + if (itp->number == 0) { + atomic_set(&itp->stop_tx, 1); + queue_delayed_work(priv->workqueue, &itp->tx_finish, HZ/10); + goto out; + } + + if (!itp->data) + goto out; + + if (priv->hw_bufs_used >= 2) { + if (!atomic_read(&priv->bh_rx)) + atomic_set(&priv->bh_rx, 1); + atomic_set(&priv->bh_tx, 1); + goto out; + } + + ktime_get_ts(&now); + time_left_us = (itp->last_sent.tv_sec - now.tv_sec)*1000000 + + (itp->last_sent.tv_nsec - now.tv_nsec)/1000 + + itp->interval_us; + + if (time_left_us > ITP_TIME_THRES_US) { + queue_delayed_work(priv->workqueue, &itp->tx_work, + ITP_US_TO_MS(time_left_us)*HZ/1000); + goto out; + } + + if (time_left_us > 50) + udelay(time_left_us); + + if (itp->number > 0) + itp->number--; + + *data = itp->data; + *tx_len = itp->data_len + itp->hdr_len; + + if (itp->data_mode == ITP_DATA_RANDOM) + cw1200_itp_fill_pattern(&itp->data[itp->hdr_len], + itp->data_len, itp->data_mode); + *burst = 2; + atomic_set(&priv->bh_tx, 1); + ktime_get_ts(&itp->last_sent); + atomic_add(1, &itp->awaiting_confirm); + spin_unlock_bh(&itp->tx_lock); + return 1; + +out: + spin_unlock_bh(&itp->tx_lock); + return 0; +} + +bool cw1200_itp_rxed(struct cw1200_common *priv, struct sk_buff *skb) +{ + struct cw1200_itp *itp = &priv->debug->itp; + struct ieee80211_rx_status *rx = IEEE80211_SKB_RXCB(skb); + int signal; + + if (atomic_read(&itp->test_mode) != TEST_MODE_RX_TEST) + return cw1200_is_itp(priv); + if (rx->freq != priv->channel->center_freq) + return true; + + signal = rx->signal; + itp->rx_cnt++; + itp->rx_rssi += signal; + if (itp->rx_rssi_min > rx->signal) + itp->rx_rssi_min = rx->signal; + if (itp->rx_rssi_max < rx->signal) + itp->rx_rssi_max = rx->signal; + + return true; +} + +void cw1200_itp_wake_up_tx(struct cw1200_common *priv) +{ + wake_up(&priv->debug->itp.write_wait); +} + +bool cw1200_itp_tx_running(struct cw1200_common *priv) +{ + if (atomic_read(&priv->debug->itp.awaiting_confirm) || + atomic_read(&priv->debug->itp.test_mode) == + TEST_MODE_TX_TEST) { + atomic_sub(1, &priv->debug->itp.awaiting_confirm); + return true; + } + return false; +} + +static void cw1200_itp_handle(struct cw1200_common *priv, + struct sk_buff *skb) +{ + struct cw1200_itp *itp = &priv->debug->itp; + const struct wiphy *wiphy = priv->hw->wiphy; + int cmd; + int ret; + + pr_debug("[ITP] <<< %s", skb->data); + if (sscanf(skb->data, "%d", &cmd) != 1) { + cw1200_itp_err(priv, -EINVAL, 1); + return; + } + + switch (cmd) { + case 1: /* RX test */ + if (atomic_read(&itp->test_mode)) { + cw1200_itp_err(priv, -EBUSY, 0); + return; + } + ret = sscanf(skb->data, "%d,%d,%d", + &cmd, &itp->band, &itp->ch); + if (ret != 3) { + cw1200_itp_err(priv, -EINVAL, ret + 1); + return; + } + if (itp->band >= 2) { + cw1200_itp_err(priv, -EINVAL, 2); + } else if (!wiphy->bands[itp->band]) { + cw1200_itp_err(priv, -EINVAL, 2); + } else if (itp->ch >= wiphy->bands[itp->band]->n_channels) { + cw1200_itp_err(priv, -EINVAL, 3); + } else { + cw1200_itp_rx_stats(priv); + cw1200_itp_rx_start(priv); + } + break; + case 2: /* RX stat */ + cw1200_itp_rx_stats(priv); + break; + case 3: /* RX/TX stop */ + if (atomic_read(&itp->test_mode) == TEST_MODE_RX_TEST) { + cw1200_itp_rx_stats(priv); + cw1200_itp_rx_stop(priv); + } else if (atomic_read(&itp->test_mode) == TEST_MODE_TX_TEST) { + cw1200_itp_tx_stop(priv); + } else { + cw1200_itp_err(priv, -EBUSY, 0); + } + break; + case 4: /* TX start */ + if (atomic_read(&itp->test_mode) != TEST_MODE_NO_TEST) { + cw1200_itp_err(priv, -EBUSY, 0); + return; + } + ret = sscanf(skb->data, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d", + &cmd, &itp->band, &itp->ch, &itp->rate, + &itp->preamble, &itp->number, &itp->data_mode, + &itp->interval_us, &itp->power, &itp->data_len); + if (ret != 10) { + cw1200_itp_err(priv, -EINVAL, ret + 1); + return; + } + if (itp->band >= 2) { + cw1200_itp_err(priv, -EINVAL, 2); + } else if (!wiphy->bands[itp->band]) { + cw1200_itp_err(priv, -EINVAL, 2); + } else if (itp->ch >= wiphy->bands[itp->band]->n_channels) { + cw1200_itp_err(priv, -EINVAL, 3); + } else if (itp->rate >= 20) { + cw1200_itp_err(priv, -EINVAL, 4); + } else if (itp->preamble >= ITP_PREAMBLE_MAX) { + cw1200_itp_err(priv, -EINVAL, 5); + } else if (itp->data_mode >= ITP_DATA_MAX_MODE) { + cw1200_itp_err(priv, -EINVAL, 7); + } else if (itp->data_len < ITP_MIN_DATA_SIZE || + itp->data_len > (priv->wsm_caps.input_buffer_size - itp->hdr_len)) { + cw1200_itp_err(priv, -EINVAL, 8); + } else { + cw1200_itp_tx_start(priv); + } + break; + case 5: + cw1200_itp_get_version(priv, ITP_CHIP_ID); + break; + case 6: + cw1200_itp_get_version(priv, ITP_FW_VER); + break; + } +} + +static void cw1200_itp_err(struct cw1200_common *priv, + int err, int arg) +{ + struct cw1200_itp *itp = &priv->debug->itp; + struct sk_buff *skb; + static char buf[255]; + int len; + + len = snprintf(buf, sizeof(buf), "%d,%d\n", + err, arg); + if (len <= 0) + return; + + skb = dev_alloc_skb(len); + if (!skb) + return; + + skb_trim(skb, 0); + skb_put(skb, len); + + memcpy(skb->data, buf, len); + skb_queue_tail(&itp->log_queue, skb); + wake_up(&itp->read_wait); + + len = sprint_symbol(buf, + (unsigned long)__builtin_return_address(0)); + if (len <= 0) + return; + pr_debug("[ITP] error %d,%d from %s\n", + err, arg, buf); +} diff --git a/drivers/net/wireless/cw1200/itp.h b/drivers/net/wireless/cw1200/itp.h new file mode 100644 index 000000000000..1e9dfb7fcadc --- /dev/null +++ b/drivers/net/wireless/cw1200/itp.h @@ -0,0 +1,144 @@ +/* + * ITP code for ST-Ericsson CW1200 mac80211 driver + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_ITP_H_INCLUDED +#define CW1200_ITP_H_INCLUDED + +struct cw200_common; +struct wsm_tx_confirm; +struct dentry; + +#ifdef CONFIG_CW1200_ITP + +/*extern*/ struct ieee80211_channel; + +#define TEST_MODE_NO_TEST (0) +#define TEST_MODE_RX_TEST (1) +#define TEST_MODE_TX_TEST (2) +#define ITP_DEFAULT_DA_ADDR {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +#define ITP_MIN_DATA_SIZE 6 +#define ITP_MAX_DATA_SIZE 1600 +#define ITP_TIME_THRES_US 10000 +#define ITP_US_TO_MS(x) ((x)/1000) +#define ITP_MS_TO_US(x) ((x)*1000) +#define ITP_BUF_SIZE 255 + + +enum cw1200_itp_data_modes { + ITP_DATA_ZEROS, + ITP_DATA_ONES, + ITP_DATA_ZERONES, + ITP_DATA_RANDOM, + ITP_DATA_MAX_MODE, +}; + +enum cw1200_itp_version_type { + ITP_CHIP_ID, + ITP_FW_VER, +}; + +enum cw1200_itp_preamble_type { + ITP_PREAMBLE_LONG, + ITP_PREAMBLE_SHORT, + ITP_PREAMBLE_OFDM, + ITP_PREAMBLE_MIXED, + ITP_PREAMBLE_GREENFIELD, + ITP_PREAMBLE_MAX, +}; + + +struct cw1200_itp { + struct cw1200_common *priv; + atomic_t open_count; + atomic_t awaiting_confirm; + struct sk_buff_head log_queue; + wait_queue_head_t read_wait; + wait_queue_head_t write_wait; + wait_queue_head_t close_wait; + struct ieee80211_channel *saved_channel; + atomic_t stop_tx; + struct delayed_work tx_work; + struct delayed_work tx_finish; + spinlock_t tx_lock; + struct timespec last_sent; + atomic_t test_mode; + int rx_cnt; + long rx_rssi; + int rx_rssi_max; + int rx_rssi_min; + unsigned band; + unsigned ch; + unsigned rate; + unsigned preamble; + unsigned int number; + unsigned data_mode; + int interval_us; + int power; + u8 *data; + int hdr_len; + int data_len; +}; + +int cw1200_itp_init(struct cw1200_common *priv); +void cw1200_itp_release(struct cw1200_common *priv); + +bool cw1200_is_itp(struct cw1200_common *priv); +bool cw1200_itp_rxed(struct cw1200_common *priv, struct sk_buff *skb); +void cw1200_itp_wake_up_tx(struct cw1200_common *priv); +int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst); +bool cw1200_itp_tx_running(struct cw1200_common *priv); + +#else /* CONFIG_CW1200_ITP */ + +static inline int cw1200_itp_init(struct cw1200_common *priv) +{ + return 0; +} + +static inline void cw1200_itp_release(struct cw1200_common *priv) +{ +} + +static inline bool cw1200_is_itp(struct cw1200_common *priv) +{ + return false; +} + +static inline bool cw1200_itp_rxed(struct cw1200_common *priv, + struct sk_buff *skb) +{ + return false; +} + + +static inline void cw1200_itp_consume_txed(struct cw1200_common *priv) +{ +} + +static inline void cw1200_itp_wake_up_tx(struct cw1200_common *priv) +{ +} + +static inline int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst) +{ + return 0; +} + +static inline bool cw1200_itp_tx_running(struct cw1200_common *priv) +{ + return false; +} + +#endif /* CONFIG_CW1200_ITP */ + +#endif /* CW1200_ITP_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c new file mode 100644 index 000000000000..8426d3d7607e --- /dev/null +++ b/drivers/net/wireless/cw1200/main.c @@ -0,0 +1,618 @@ +/* + * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * Based on: + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007-2009, Christian Lamparter + * Copyright 2008, Johannes Berg + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * - stlc45xx driver + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cw1200.h" +#include "txrx.h" +#include "sbus.h" +#include "fwio.h" +#include "hwio.h" +#include "bh.h" +#include "sta.h" +#include "scan.h" +#include "debug.h" +#include "pm.h" + +MODULE_AUTHOR("Dmitry Tarnyagin "); +MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("cw1200_core"); + +/* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */ +static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00}; +module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(macaddr, "Override platform_data MAC address"); + +static char *cw1200_sdd_path; +module_param(cw1200_sdd_path, charp, 0644); +MODULE_PARM_DESC(cw1200_sdd_path, "Override platform_data SDD file"); +static int cw1200_refclk; +module_param(cw1200_refclk, int, 0644); +MODULE_PARM_DESC(cw1200_refclk, "Override platform_data reference clock"); + +int cw1200_power_mode = wsm_power_mode_quiescent; +module_param(cw1200_power_mode, int, 0644); +MODULE_PARM_DESC(cw1200_power_mode, "WSM power mode. 0 == active, 1 == doze, 2 == quiescent (default)"); + +#ifdef CONFIG_CW1200_ETF +int etf_mode; +module_param(etf_mode, int, 0644); +MODULE_PARM_DESC(etf_mode, "Enable EngineeringTestingFramework operation"); +#endif + +#define RATETAB_ENT(_rate, _rateid, _flags) \ + { \ + .bitrate = (_rate), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +static struct ieee80211_rate cw1200_rates[] = { + RATETAB_ENT(10, 0, 0), + RATETAB_ENT(20, 1, 0), + RATETAB_ENT(55, 2, 0), + RATETAB_ENT(110, 3, 0), + RATETAB_ENT(60, 6, 0), + RATETAB_ENT(90, 7, 0), + RATETAB_ENT(120, 8, 0), + RATETAB_ENT(180, 9, 0), + RATETAB_ENT(240, 10, 0), + RATETAB_ENT(360, 11, 0), + RATETAB_ENT(480, 12, 0), + RATETAB_ENT(540, 13, 0), +}; + +static struct ieee80211_rate cw1200_mcs_rates[] = { + RATETAB_ENT(65, 14, IEEE80211_TX_RC_MCS), + RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS), + RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS), + RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS), + RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS), + RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS), + RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS), + RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS), +}; + +#define cw1200_a_rates (cw1200_rates + 4) +#define cw1200_a_rates_size (ARRAY_SIZE(cw1200_rates) - 4) +#define cw1200_g_rates (cw1200_rates + 0) +#define cw1200_g_rates_size (ARRAY_SIZE(cw1200_rates)) +#define cw1200_n_rates (cw1200_mcs_rates) +#define cw1200_n_rates_size (ARRAY_SIZE(cw1200_mcs_rates)) + + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_channel cw1200_2ghz_chantable[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static struct ieee80211_channel cw1200_5ghz_chantable[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0), + CHAN5G(184, 0), CHAN5G(188, 0), + CHAN5G(192, 0), CHAN5G(196, 0), + CHAN5G(200, 0), CHAN5G(204, 0), + CHAN5G(208, 0), CHAN5G(212, 0), + CHAN5G(216, 0), +}; + +static struct ieee80211_supported_band cw1200_band_2ghz = { + .channels = cw1200_2ghz_chantable, + .n_channels = ARRAY_SIZE(cw1200_2ghz_chantable), + .bitrates = cw1200_g_rates, + .n_bitrates = cw1200_g_rates_size, + .ht_cap = { + .cap = IEEE80211_HT_CAP_GRN_FLD | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | + IEEE80211_HT_CAP_MAX_AMSDU, + .ht_supported = 1, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, + .mcs = { + .rx_mask[0] = 0xFF, + .rx_highest = __cpu_to_le16(0x41), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static struct ieee80211_supported_band cw1200_band_5ghz = { + .channels = cw1200_5ghz_chantable, + .n_channels = ARRAY_SIZE(cw1200_5ghz_chantable), + .bitrates = cw1200_a_rates, + .n_bitrates = cw1200_a_rates_size, + .ht_cap = { + .cap = IEEE80211_HT_CAP_GRN_FLD | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | + IEEE80211_HT_CAP_MAX_AMSDU, + .ht_supported = 1, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, + .mcs = { + .rx_mask[0] = 0xFF, + .rx_highest = __cpu_to_le16(0x41), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static const unsigned long cw1200_ttl[] = { + 1 * HZ, /* VO */ + 2 * HZ, /* VI */ + 5 * HZ, /* BE */ + 10 * HZ /* BK */ +}; + +static const struct ieee80211_ops cw1200_ops = { + .start = cw1200_start, + .stop = cw1200_stop, + .add_interface = cw1200_add_interface, + .remove_interface = cw1200_remove_interface, + .change_interface = cw1200_change_interface, + .tx = cw1200_tx, + .hw_scan = cw1200_hw_scan, + .set_tim = cw1200_set_tim, + .sta_notify = cw1200_sta_notify, + .sta_add = cw1200_sta_add, + .sta_remove = cw1200_sta_remove, + .set_key = cw1200_set_key, + .set_rts_threshold = cw1200_set_rts_threshold, + .config = cw1200_config, + .bss_info_changed = cw1200_bss_info_changed, + .prepare_multicast = cw1200_prepare_multicast, + .configure_filter = cw1200_configure_filter, + .conf_tx = cw1200_conf_tx, + .get_stats = cw1200_get_stats, + .ampdu_action = cw1200_ampdu_action, + .flush = cw1200_flush, + .suspend = cw1200_wow_suspend, + .resume = cw1200_wow_resume, + /* Intentionally not offloaded: */ + /*.channel_switch = cw1200_channel_switch, */ + /*.remain_on_channel = cw1200_remain_on_channel, */ + /*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */ +}; + +int cw1200_ba_rx_tids = -1; +int cw1200_ba_tx_tids = -1; +module_param(cw1200_ba_rx_tids, int, 0644); +module_param(cw1200_ba_tx_tids, int, 0644); +MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs"); +MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs"); + +static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, + const bool have_5ghz) +{ + int i, band; + struct ieee80211_hw *hw; + struct cw1200_common *priv; + + hw = ieee80211_alloc_hw(sizeof(struct cw1200_common), &cw1200_ops); + if (!hw) + return NULL; + + priv = hw->priv; + priv->hw = hw; + priv->hw_type = -1; + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->rates = cw1200_rates; /* TODO: fetch from FW */ + priv->mcs_rates = cw1200_n_rates; + if (cw1200_ba_rx_tids != -1) + priv->ba_rx_tid_mask = cw1200_ba_rx_tids; + else + priv->ba_rx_tid_mask = 0xFF; /* Enable RX BLKACK for all TIDs */ + if (cw1200_ba_tx_tids != -1) + priv->ba_tx_tid_mask = cw1200_ba_tx_tids; + else + priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */ + + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_SUPPORTS_UAPSD | + IEEE80211_HW_CONNECTION_MONITOR | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | + IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC; + + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + + /* Support only for limited wowlan functionalities */ + hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY | + WIPHY_WOWLAN_DISCONNECT; + hw->wiphy->wowlan.n_patterns = 0; + + hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + + hw->channel_change_time = 1000; /* TODO: find actual value */ + hw->queues = 4; + + priv->rts_threshold = -1; + + hw->max_rates = 8; + hw->max_rate_tries = 15; + hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM + + 8; /* TKIP IV */ + + hw->sta_data_size = sizeof(struct cw1200_sta_priv); + + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz; + if (have_5ghz) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz; + + /* Channel params have to be cleared before registering wiphy again */ + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; + if (!sband) + continue; + for (i = 0; i < sband->n_channels; i++) { + sband->channels[i].flags = 0; + sband->channels[i].max_antenna_gain = 0; + sband->channels[i].max_power = 30; + } + } + + hw->wiphy->max_scan_ssids = 2; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + + if (macaddr) + SET_IEEE80211_PERM_ADDR(hw, (u8 *)macaddr); + else + SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template); + + /* Fix up mac address if necessary */ + if (hw->wiphy->perm_addr[3] == 0 && + hw->wiphy->perm_addr[4] == 0 && + hw->wiphy->perm_addr[5] == 0) { + get_random_bytes(&hw->wiphy->perm_addr[3], 3); + } + + mutex_init(&priv->wsm_cmd_mux); + mutex_init(&priv->conf_mutex); + priv->workqueue = create_singlethread_workqueue("cw1200_wq"); + sema_init(&priv->scan.lock, 1); + INIT_WORK(&priv->scan.work, cw1200_scan_work); + INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work); + INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout); + INIT_DELAYED_WORK(&priv->clear_recent_scan_work, + cw1200_clear_recent_scan_work); + INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout); + INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work); + INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work); + INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work); + INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work); + spin_lock_init(&priv->event_queue_lock); + INIT_LIST_HEAD(&priv->event_queue); + INIT_WORK(&priv->event_handler, cw1200_event_handler); + INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work); + INIT_WORK(&priv->bss_params_work, cw1200_bss_params_work); + spin_lock_init(&priv->bss_loss_lock); + spin_lock_init(&priv->ps_state_lock); + INIT_WORK(&priv->set_cts_work, cw1200_set_cts_work); + INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work); + INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work); + INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work); + INIT_WORK(&priv->link_id_work, cw1200_link_id_work); + INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work); + INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset); + INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work); + INIT_WORK(&priv->set_beacon_wakeup_period_work, + cw1200_set_beacon_wakeup_period_work); + init_timer(&priv->mcast_timeout); + priv->mcast_timeout.data = (unsigned long)priv; + priv->mcast_timeout.function = cw1200_mcast_timeout; + + if (cw1200_queue_stats_init(&priv->tx_queue_stats, + CW1200_LINK_ID_MAX, + cw1200_skb_dtor, + priv)) { + ieee80211_free_hw(hw); + return NULL; + } + + for (i = 0; i < 4; ++i) { + if (cw1200_queue_init(&priv->tx_queue[i], + &priv->tx_queue_stats, i, 16, + cw1200_ttl[i])) { + for (; i > 0; i--) + cw1200_queue_deinit(&priv->tx_queue[i - 1]); + cw1200_queue_stats_deinit(&priv->tx_queue_stats); + ieee80211_free_hw(hw); + return NULL; + } + } + + init_waitqueue_head(&priv->channel_switch_done); + init_waitqueue_head(&priv->wsm_cmd_wq); + init_waitqueue_head(&priv->wsm_startup_done); + init_waitqueue_head(&priv->ps_mode_switch_done); + wsm_buf_init(&priv->wsm_cmd_buf); + spin_lock_init(&priv->wsm_cmd.lock); + priv->wsm_cmd.done = 1; + tx_policy_init(priv); + + return hw; +} + +static int cw1200_register_common(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int err; + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) + goto done; +#endif + + err = cw1200_pm_init(&priv->pm_state, priv); + if (err) { + pr_err("Cannot init PM. (%d).\n", + err); + return err; + } + + err = ieee80211_register_hw(dev); + if (err) { + pr_err("Cannot register device (%d).\n", + err); + cw1200_pm_deinit(&priv->pm_state); + return err; + } + +#ifdef CONFIG_CW1200_ETF +done: +#endif + cw1200_debug_init(priv); + + pr_info("Registered as '%s'\n", wiphy_name(dev->wiphy)); + return 0; +} + +static void cw1200_free_common(struct ieee80211_hw *dev) +{ + ieee80211_free_hw(dev); +} + +static void cw1200_unregister_common(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int i; + +#ifdef CONFIG_CW1200_ETF + if (!etf_mode) { +#endif + ieee80211_unregister_hw(dev); +#ifdef CONFIG_CW1200_ETF + } +#endif + + del_timer_sync(&priv->mcast_timeout); + cw1200_unregister_bh(priv); + + cw1200_debug_release(priv); + + mutex_destroy(&priv->conf_mutex); + + wsm_buf_deinit(&priv->wsm_cmd_buf); + + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + if (priv->sdd) { + release_firmware(priv->sdd); + priv->sdd = NULL; + } + + for (i = 0; i < 4; ++i) + cw1200_queue_deinit(&priv->tx_queue[i]); + + cw1200_queue_stats_deinit(&priv->tx_queue_stats); + cw1200_pm_deinit(&priv->pm_state); +} + +/* Clock is in KHz */ +u32 cw1200_dpll_from_clk(u16 clk_khz) +{ + switch (clk_khz) { + case 0x32C8: /* 13000 KHz */ + return 0x1D89D241; + case 0x3E80: /* 16000 KHz */ + return 0x000001E1; + case 0x41A0: /* 16800 KHz */ + return 0x124931C1; + case 0x4B00: /* 19200 KHz */ + return 0x00000191; + case 0x5DC0: /* 24000 KHz */ + return 0x00000141; + case 0x6590: /* 26000 KHz */ + return 0x0EC4F121; + case 0x8340: /* 33600 KHz */ + return 0x092490E1; + case 0x9600: /* 38400 KHz */ + return 0x100010C1; + case 0x9C40: /* 40000 KHz */ + return 0x000000C1; + case 0xBB80: /* 48000 KHz */ + return 0x000000A1; + case 0xCB20: /* 52000 KHz */ + return 0x07627091; + default: + pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n", + clk_khz); + return 0x0EC4F121; + } +} + +int cw1200_core_probe(const struct sbus_ops *sbus_ops, + struct sbus_priv *sbus, + struct device *pdev, + struct cw1200_common **core, + int ref_clk, const u8 *macaddr, + const char *sdd_path, bool have_5ghz) +{ + int err = -EINVAL; + struct ieee80211_hw *dev; + struct cw1200_common *priv; + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + dev = cw1200_init_common(macaddr, have_5ghz); + if (!dev) + goto err; + + priv = dev->priv; + priv->hw_refclk = ref_clk; + if (cw1200_refclk) + priv->hw_refclk = cw1200_refclk; + + priv->sdd_path = (char *)sdd_path; + if (cw1200_sdd_path) + priv->sdd_path = cw1200_sdd_path; + + priv->sbus_ops = sbus_ops; + priv->sbus_priv = sbus; + priv->pdev = pdev; + SET_IEEE80211_DEV(priv->hw, pdev); + + /* Pass struct cw1200_common back up */ + *core = priv; + + err = cw1200_register_bh(priv); + if (err) + goto err1; + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) + goto skip_fw; +#endif + + err = cw1200_load_firmware(priv); + if (err) + goto err2; + + if (wait_event_interruptible_timeout(priv->wsm_startup_done, + priv->firmware_ready, + 3*HZ) <= 0) { + /* TODO: Need to find how to reset device + in QUEUE mode properly. + */ + pr_err("Timeout waiting on device startup\n"); + err = -ETIMEDOUT; + goto err2; + } + + /* Set low-power mode. */ + wsm_set_operational_mode(priv, &mode); + + /* Enable multi-TX confirmation */ + wsm_use_multi_tx_conf(priv, true); + +#ifdef CONFIG_CW1200_ETF +skip_fw: +#endif + err = cw1200_register_common(dev); + if (err) + goto err2; + + return err; + +err2: + cw1200_unregister_bh(priv); +err1: + cw1200_free_common(dev); +err: + *core = NULL; + return err; +} +EXPORT_SYMBOL_GPL(cw1200_core_probe); + +void cw1200_core_release(struct cw1200_common *self) +{ + /* Disable device interrupts */ + self->sbus_ops->lock(self->sbus_priv); + __cw1200_irq_enable(self, 0); + self->sbus_ops->unlock(self->sbus_priv); + + /* And then clean up */ + cw1200_unregister_common(self->hw); + cw1200_free_common(self->hw); + return; +} +EXPORT_SYMBOL_GPL(cw1200_core_release); diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c new file mode 100644 index 000000000000..79edfb93b292 --- /dev/null +++ b/drivers/net/wireless/cw1200/pm.c @@ -0,0 +1,367 @@ +/* + * Mac80211 power management API for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "cw1200.h" +#include "pm.h" +#include "sta.h" +#include "bh.h" +#include "sbus.h" + +#define CW1200_BEACON_SKIPPING_MULTIPLIER 3 + +struct cw1200_udp_port_filter { + struct wsm_udp_port_filter_hdr hdr; + /* Up to 4 filters are allowed. */ + struct wsm_udp_port_filter filters[WSM_MAX_FILTER_ELEMENTS]; +} __packed; + +struct cw1200_ether_type_filter { + struct wsm_ether_type_filter_hdr hdr; + /* Up to 4 filters are allowed. */ + struct wsm_ether_type_filter filters[WSM_MAX_FILTER_ELEMENTS]; +} __packed; + +static struct cw1200_udp_port_filter cw1200_udp_port_filter_on = { + .hdr.num = 2, + .filters = { + [0] = { + .action = WSM_FILTER_ACTION_FILTER_OUT, + .type = WSM_FILTER_PORT_TYPE_DST, + .port = __cpu_to_le16(67), /* DHCP Bootps */ + }, + [1] = { + .action = WSM_FILTER_ACTION_FILTER_OUT, + .type = WSM_FILTER_PORT_TYPE_DST, + .port = __cpu_to_le16(68), /* DHCP Bootpc */ + }, + } +}; + +static struct wsm_udp_port_filter_hdr cw1200_udp_port_filter_off = { + .num = 0, +}; + +#ifndef ETH_P_WAPI +#define ETH_P_WAPI 0x88B4 +#endif + +static struct cw1200_ether_type_filter cw1200_ether_type_filter_on = { + .hdr.num = 4, + .filters = { + [0] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_IP), + }, + [1] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_PAE), + }, + [2] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_WAPI), + }, + [3] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_ARP), + }, + }, +}; + +static struct wsm_ether_type_filter_hdr cw1200_ether_type_filter_off = { + .num = 0, +}; + +/* private */ +struct cw1200_suspend_state { + unsigned long bss_loss_tmo; + unsigned long join_tmo; + unsigned long direct_probe; + unsigned long link_id_gc; + bool beacon_skipping; + u8 prev_ps_mode; +}; + +static void cw1200_pm_stay_awake_tmo(unsigned long arg) +{ + /* XXX what's the point of this ? */ +} + +int cw1200_pm_init(struct cw1200_pm_state *pm, + struct cw1200_common *priv) +{ + spin_lock_init(&pm->lock); + + init_timer(&pm->stay_awake); + pm->stay_awake.data = (unsigned long)pm; + pm->stay_awake.function = cw1200_pm_stay_awake_tmo; + + return 0; +} + +void cw1200_pm_deinit(struct cw1200_pm_state *pm) +{ + del_timer_sync(&pm->stay_awake); +} + +void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, + unsigned long tmo) +{ + long cur_tmo; + spin_lock_bh(&pm->lock); + cur_tmo = pm->stay_awake.expires - jiffies; + if (!timer_pending(&pm->stay_awake) || cur_tmo < (long)tmo) + mod_timer(&pm->stay_awake, jiffies + tmo); + spin_unlock_bh(&pm->lock); +} + +static long cw1200_suspend_work(struct delayed_work *work) +{ + int ret = cancel_delayed_work(work); + long tmo; + if (ret > 0) { + /* Timer is pending */ + tmo = work->timer.expires - jiffies; + if (tmo < 0) + tmo = 0; + } else { + tmo = -1; + } + return tmo; +} + +static int cw1200_resume_work(struct cw1200_common *priv, + struct delayed_work *work, + unsigned long tmo) +{ + if ((long)tmo < 0) + return 1; + + return queue_delayed_work(priv->workqueue, work, tmo); +} + +int cw1200_can_suspend(struct cw1200_common *priv) +{ + if (atomic_read(&priv->bh_rx)) { + wiphy_dbg(priv->hw->wiphy, "Suspend interrupted.\n"); + return 0; + } + return 1; +} +EXPORT_SYMBOL_GPL(cw1200_can_suspend); + +int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_pm_state *pm_state = &priv->pm_state; + struct cw1200_suspend_state *state; + int ret; + + spin_lock_bh(&pm_state->lock); + ret = timer_pending(&pm_state->stay_awake); + spin_unlock_bh(&pm_state->lock); + if (ret) + return -EAGAIN; + + /* Do not suspend when datapath is not idle */ + if (priv->tx_queue_stats.num_queued) + return -EBUSY; + + /* Make sure there is no configuration requests in progress. */ + if (!mutex_trylock(&priv->conf_mutex)) + return -EBUSY; + + /* Ensure pending operations are done. + * Note also that wow_suspend must return in ~2.5sec, before + * watchdog is triggered. + */ + if (priv->channel_switch_in_progress) + goto revert1; + + /* Do not suspend when join is pending */ + if (priv->join_pending) + goto revert1; + + /* Do not suspend when scanning */ + if (down_trylock(&priv->scan.lock)) + goto revert1; + + /* Lock TX. */ + wsm_lock_tx_async(priv); + + /* Wait to avoid possible race with bh code. + * But do not wait too long... + */ + if (wait_event_timeout(priv->bh_evt_wq, + !priv->hw_bufs_used, HZ / 10) <= 0) + goto revert2; + + /* Set UDP filter */ + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_on.hdr); + + /* Set ethernet frame type filter */ + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_on.hdr); + + /* Allocate state */ + state = kzalloc(sizeof(struct cw1200_suspend_state), GFP_KERNEL); + if (!state) + goto revert3; + + /* Change to legacy PS while going to suspend */ + if (!priv->vif->p2p && + priv->join_status == CW1200_JOIN_STATUS_STA && + priv->powersave_mode.mode != WSM_PSM_PS) { + state->prev_ps_mode = priv->powersave_mode.mode; + priv->powersave_mode.mode = WSM_PSM_PS; + cw1200_set_pm(priv, &priv->powersave_mode); + if (wait_event_interruptible_timeout(priv->ps_mode_switch_done, + !priv->ps_mode_switch_in_progress, 1*HZ) <= 0) { + goto revert3; + } + } + + /* Store delayed work states. */ + state->bss_loss_tmo = + cw1200_suspend_work(&priv->bss_loss_work); + state->join_tmo = + cw1200_suspend_work(&priv->join_timeout); + state->direct_probe = + cw1200_suspend_work(&priv->scan.probe_work); + state->link_id_gc = + cw1200_suspend_work(&priv->link_id_gc_work); + + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + atomic_set(&priv->recent_scan, 0); + + /* Enable beacon skipping */ + if (priv->join_status == CW1200_JOIN_STATUS_STA && + priv->join_dtim_period && + !priv->has_multicast_subscription) { + state->beacon_skipping = true; + wsm_set_beacon_wakeup_period(priv, + priv->join_dtim_period, + CW1200_BEACON_SKIPPING_MULTIPLIER * priv->join_dtim_period); + } + + /* Stop serving thread */ + if (cw1200_bh_suspend(priv)) + goto revert4; + + ret = timer_pending(&priv->mcast_timeout); + if (ret) + goto revert5; + + /* Store suspend state */ + pm_state->suspend_state = state; + + /* Enable IRQ wake */ + ret = priv->sbus_ops->power_mgmt(priv->sbus_priv, true); + if (ret) { + wiphy_err(priv->hw->wiphy, + "PM request failed: %d. WoW is disabled.\n", ret); + cw1200_wow_resume(hw); + return -EBUSY; + } + + /* Force resume if event is coming from the device. */ + if (atomic_read(&priv->bh_rx)) { + cw1200_wow_resume(hw); + return -EAGAIN; + } + + return 0; + +revert5: + WARN_ON(cw1200_bh_resume(priv)); +revert4: + cw1200_resume_work(priv, &priv->bss_loss_work, + state->bss_loss_tmo); + cw1200_resume_work(priv, &priv->join_timeout, + state->join_tmo); + cw1200_resume_work(priv, &priv->scan.probe_work, + state->direct_probe); + cw1200_resume_work(priv, &priv->link_id_gc_work, + state->link_id_gc); + kfree(state); +revert3: + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off); + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off); +revert2: + wsm_unlock_tx(priv); + up(&priv->scan.lock); +revert1: + mutex_unlock(&priv->conf_mutex); + return -EBUSY; +} + +int cw1200_wow_resume(struct ieee80211_hw *hw) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_pm_state *pm_state = &priv->pm_state; + struct cw1200_suspend_state *state; + + state = pm_state->suspend_state; + pm_state->suspend_state = NULL; + + /* Disable IRQ wake */ + priv->sbus_ops->power_mgmt(priv->sbus_priv, false); + + /* Scan.lock must be released before BH is resumed other way + * in case when BSS_LOST command arrived the processing of the + * command will be delayed. + */ + up(&priv->scan.lock); + + /* Resume BH thread */ + WARN_ON(cw1200_bh_resume(priv)); + + /* Restores previous PS mode */ + if (!priv->vif->p2p && priv->join_status == CW1200_JOIN_STATUS_STA) { + priv->powersave_mode.mode = state->prev_ps_mode; + cw1200_set_pm(priv, &priv->powersave_mode); + } + + if (state->beacon_skipping) { + wsm_set_beacon_wakeup_period(priv, priv->beacon_int * + priv->join_dtim_period > + MAX_BEACON_SKIP_TIME_MS ? 1 : + priv->join_dtim_period, 0); + state->beacon_skipping = false; + } + + /* Resume delayed work */ + cw1200_resume_work(priv, &priv->bss_loss_work, + state->bss_loss_tmo); + cw1200_resume_work(priv, &priv->join_timeout, + state->join_tmo); + cw1200_resume_work(priv, &priv->scan.probe_work, + state->direct_probe); + cw1200_resume_work(priv, &priv->link_id_gc_work, + state->link_id_gc); + + /* Remove UDP port filter */ + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off); + + /* Remove ethernet frame type filter */ + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off); + + /* Unlock datapath */ + wsm_unlock_tx(priv); + + /* Unlock configuration mutex */ + mutex_unlock(&priv->conf_mutex); + + /* Free memory */ + kfree(state); + + return 0; +} diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/cw1200/pm.h new file mode 100644 index 000000000000..516d9671dd1e --- /dev/null +++ b/drivers/net/wireless/cw1200/pm.h @@ -0,0 +1,38 @@ +/* + * Mac80211 power management interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef PM_H_INCLUDED +#define PM_H_INCLUDED + +/* ******************************************************************** */ +/* mac80211 API */ + +/* extern */ struct cw1200_common; +/* private */ struct cw1200_suspend_state; + +struct cw1200_pm_state { + struct cw1200_suspend_state *suspend_state; + struct timer_list stay_awake; + struct platform_device *pm_dev; + spinlock_t lock; /* Protect access */ +}; + +int cw1200_pm_init(struct cw1200_pm_state *pm, + struct cw1200_common *priv); +void cw1200_pm_deinit(struct cw1200_pm_state *pm); +void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, + unsigned long tmo); +int cw1200_wow_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); +int cw1200_wow_resume(struct ieee80211_hw *hw); +int cw1200_can_suspend(struct cw1200_common *priv); + +#endif diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/cw1200/queue.c new file mode 100644 index 000000000000..8510454d5db1 --- /dev/null +++ b/drivers/net/wireless/cw1200/queue.c @@ -0,0 +1,583 @@ +/* + * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "queue.h" +#include "cw1200.h" +#include "debug.h" + +/* private */ struct cw1200_queue_item +{ + struct list_head head; + struct sk_buff *skb; + u32 packet_id; + unsigned long queue_timestamp; + unsigned long xmit_timestamp; + struct cw1200_txpriv txpriv; + u8 generation; +}; + +static inline void __cw1200_queue_lock(struct cw1200_queue *queue) +{ + struct cw1200_queue_stats *stats = queue->stats; + if (queue->tx_locked_cnt++ == 0) { + pr_debug("[TX] Queue %d is locked.\n", + queue->queue_id); + ieee80211_stop_queue(stats->priv->hw, queue->queue_id); + } +} + +static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) +{ + struct cw1200_queue_stats *stats = queue->stats; + BUG_ON(!queue->tx_locked_cnt); + if (--queue->tx_locked_cnt == 0) { + pr_debug("[TX] Queue %d is unlocked.\n", + queue->queue_id); + ieee80211_wake_queue(stats->priv->hw, queue->queue_id); + } +} + +static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation, + u8 *queue_id, u8 *item_generation, + u8 *item_id) +{ + *item_id = (packet_id >> 0) & 0xFF; + *item_generation = (packet_id >> 8) & 0xFF; + *queue_id = (packet_id >> 16) & 0xFF; + *queue_generation = (packet_id >> 24) & 0xFF; +} + +static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id, + u8 item_generation, u8 item_id) +{ + return ((u32)item_id << 0) | + ((u32)item_generation << 8) | + ((u32)queue_id << 16) | + ((u32)queue_generation << 24); +} + +static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats, + struct list_head *gc_list) +{ + struct cw1200_queue_item *item, *tmp; + + list_for_each_entry_safe(item, tmp, gc_list, head) { + list_del(&item->head); + stats->skb_dtor(stats->priv, item->skb, &item->txpriv); + kfree(item); + } +} + +static void cw1200_queue_register_post_gc(struct list_head *gc_list, + struct cw1200_queue_item *item) +{ + struct cw1200_queue_item *gc_item; + gc_item = kmalloc(sizeof(struct cw1200_queue_item), + GFP_ATOMIC); + BUG_ON(!gc_item); + memcpy(gc_item, item, sizeof(struct cw1200_queue_item)); + list_add_tail(&gc_item->head, gc_list); +} + +static void __cw1200_queue_gc(struct cw1200_queue *queue, + struct list_head *head, + bool unlock) +{ + struct cw1200_queue_stats *stats = queue->stats; + struct cw1200_queue_item *item = NULL, *tmp; + bool wakeup_stats = false; + + list_for_each_entry_safe(item, tmp, &queue->queue, head) { + if (jiffies - item->queue_timestamp < queue->ttl) + break; + --queue->num_queued; + --queue->link_map_cache[item->txpriv.link_id]; + spin_lock_bh(&stats->lock); + --stats->num_queued; + if (!--stats->link_map_cache[item->txpriv.link_id]) + wakeup_stats = true; + spin_unlock_bh(&stats->lock); + cw1200_debug_tx_ttl(stats->priv); + cw1200_queue_register_post_gc(head, item); + item->skb = NULL; + list_move_tail(&item->head, &queue->free_pool); + } + + if (wakeup_stats) + wake_up(&stats->wait_link_id_empty); + + if (queue->overfull) { + if (queue->num_queued <= (queue->capacity >> 1)) { + queue->overfull = false; + if (unlock) + __cw1200_queue_unlock(queue); + } else if (item) { + unsigned long tmo = item->queue_timestamp + queue->ttl; + mod_timer(&queue->gc, tmo); + cw1200_pm_stay_awake(&stats->priv->pm_state, + tmo - jiffies); + } + } +} + +static void cw1200_queue_gc(unsigned long arg) +{ + LIST_HEAD(list); + struct cw1200_queue *queue = + (struct cw1200_queue *)arg; + + spin_lock_bh(&queue->lock); + __cw1200_queue_gc(queue, &list, true); + spin_unlock_bh(&queue->lock); + cw1200_queue_post_gc(queue->stats, &list); +} + +int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, + size_t map_capacity, + cw1200_queue_skb_dtor_t skb_dtor, + struct cw1200_common *priv) +{ + memset(stats, 0, sizeof(*stats)); + stats->map_capacity = map_capacity; + stats->skb_dtor = skb_dtor; + stats->priv = priv; + spin_lock_init(&stats->lock); + init_waitqueue_head(&stats->wait_link_id_empty); + + stats->link_map_cache = kzalloc(sizeof(int) * map_capacity, + GFP_KERNEL); + if (!stats->link_map_cache) + return -ENOMEM; + + return 0; +} + +int cw1200_queue_init(struct cw1200_queue *queue, + struct cw1200_queue_stats *stats, + u8 queue_id, + size_t capacity, + unsigned long ttl) +{ + size_t i; + + memset(queue, 0, sizeof(*queue)); + queue->stats = stats; + queue->capacity = capacity; + queue->queue_id = queue_id; + queue->ttl = ttl; + INIT_LIST_HEAD(&queue->queue); + INIT_LIST_HEAD(&queue->pending); + INIT_LIST_HEAD(&queue->free_pool); + spin_lock_init(&queue->lock); + init_timer(&queue->gc); + queue->gc.data = (unsigned long)queue; + queue->gc.function = cw1200_queue_gc; + + queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, + GFP_KERNEL); + if (!queue->pool) + return -ENOMEM; + + queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity, + GFP_KERNEL); + if (!queue->link_map_cache) { + kfree(queue->pool); + queue->pool = NULL; + return -ENOMEM; + } + + for (i = 0; i < capacity; ++i) + list_add_tail(&queue->pool[i].head, &queue->free_pool); + + return 0; +} + +int cw1200_queue_clear(struct cw1200_queue *queue) +{ + int i; + LIST_HEAD(gc_list); + struct cw1200_queue_stats *stats = queue->stats; + struct cw1200_queue_item *item, *tmp; + + spin_lock_bh(&queue->lock); + queue->generation++; + list_splice_tail_init(&queue->queue, &queue->pending); + list_for_each_entry_safe(item, tmp, &queue->pending, head) { + WARN_ON(!item->skb); + cw1200_queue_register_post_gc(&gc_list, item); + item->skb = NULL; + list_move_tail(&item->head, &queue->free_pool); + } + queue->num_queued = 0; + queue->num_pending = 0; + + spin_lock_bh(&stats->lock); + for (i = 0; i < stats->map_capacity; ++i) { + stats->num_queued -= queue->link_map_cache[i]; + stats->link_map_cache[i] -= queue->link_map_cache[i]; + queue->link_map_cache[i] = 0; + } + spin_unlock_bh(&stats->lock); + if (queue->overfull) { + queue->overfull = false; + __cw1200_queue_unlock(queue); + } + spin_unlock_bh(&queue->lock); + wake_up(&stats->wait_link_id_empty); + cw1200_queue_post_gc(stats, &gc_list); + return 0; +} + +void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats) +{ + kfree(stats->link_map_cache); + stats->link_map_cache = NULL; +} + +void cw1200_queue_deinit(struct cw1200_queue *queue) +{ + cw1200_queue_clear(queue); + del_timer_sync(&queue->gc); + INIT_LIST_HEAD(&queue->free_pool); + kfree(queue->pool); + kfree(queue->link_map_cache); + queue->pool = NULL; + queue->link_map_cache = NULL; + queue->capacity = 0; +} + +size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, + u32 link_id_map) +{ + size_t ret; + int i, bit; + size_t map_capacity = queue->stats->map_capacity; + + if (!link_id_map) + return 0; + + spin_lock_bh(&queue->lock); + if (link_id_map == (u32)-1) { + ret = queue->num_queued - queue->num_pending; + } else { + ret = 0; + for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) { + if (link_id_map & bit) + ret += queue->link_map_cache[i]; + } + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_put(struct cw1200_queue *queue, + struct sk_buff *skb, + struct cw1200_txpriv *txpriv) +{ + int ret = 0; + LIST_HEAD(gc_list); + struct cw1200_queue_stats *stats = queue->stats; + + if (txpriv->link_id >= queue->stats->map_capacity) + return -EINVAL; + + spin_lock_bh(&queue->lock); + if (!WARN_ON(list_empty(&queue->free_pool))) { + struct cw1200_queue_item *item = list_first_entry( + &queue->free_pool, struct cw1200_queue_item, head); + BUG_ON(item->skb); + + list_move_tail(&item->head, &queue->queue); + item->skb = skb; + item->txpriv = *txpriv; + item->generation = 0; + item->packet_id = cw1200_queue_mk_packet_id(queue->generation, + queue->queue_id, + item->generation, + item - queue->pool); + item->queue_timestamp = jiffies; + + ++queue->num_queued; + ++queue->link_map_cache[txpriv->link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[txpriv->link_id]; + spin_unlock_bh(&stats->lock); + + /* TX may happen in parallel sometimes. + * Leave extra queue slots so we don't overflow. + */ + if (queue->overfull == false && + queue->num_queued >= + (queue->capacity - (num_present_cpus() - 1))) { + queue->overfull = true; + __cw1200_queue_lock(queue); + mod_timer(&queue->gc, jiffies); + } + } else { + ret = -ENOENT; + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_get(struct cw1200_queue *queue, + u32 link_id_map, + struct wsm_tx **tx, + struct ieee80211_tx_info **tx_info, + const struct cw1200_txpriv **txpriv) +{ + int ret = -ENOENT; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + bool wakeup_stats = false; + + spin_lock_bh(&queue->lock); + list_for_each_entry(item, &queue->queue, head) { + if (link_id_map & BIT(item->txpriv.link_id)) { + ret = 0; + break; + } + } + + if (!WARN_ON(ret)) { + *tx = (struct wsm_tx *)item->skb->data; + *tx_info = IEEE80211_SKB_CB(item->skb); + *txpriv = &item->txpriv; + (*tx)->packet_id = __cpu_to_le32(item->packet_id); + list_move_tail(&item->head, &queue->pending); + ++queue->num_pending; + --queue->link_map_cache[item->txpriv.link_id]; + item->xmit_timestamp = jiffies; + + spin_lock_bh(&stats->lock); + --stats->num_queued; + if (!--stats->link_map_cache[item->txpriv.link_id]) + wakeup_stats = true; + spin_unlock_bh(&stats->lock); + } + spin_unlock_bh(&queue->lock); + if (wakeup_stats) + wake_up(&stats->wait_link_id_empty); + return ret; +} + +int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + --queue->num_pending; + ++queue->link_map_cache[item->txpriv.link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[item->txpriv.link_id]; + spin_unlock_bh(&stats->lock); + + item->generation = ++item_generation; + item->packet_id = cw1200_queue_mk_packet_id(queue_generation, + queue_id, + item_generation, + item_id); + list_move(&item->head, &queue->queue); + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_requeue_all(struct cw1200_queue *queue) +{ + struct cw1200_queue_item *item, *tmp; + struct cw1200_queue_stats *stats = queue->stats; + spin_lock_bh(&queue->lock); + + list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { + --queue->num_pending; + ++queue->link_map_cache[item->txpriv.link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[item->txpriv.link_id]; + spin_unlock_bh(&stats->lock); + + ++item->generation; + item->packet_id = cw1200_queue_mk_packet_id(queue->generation, + queue->queue_id, + item->generation, + item - queue->pool); + list_move(&item->head, &queue->queue); + } + spin_unlock_bh(&queue->lock); + + return 0; +} + +int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + struct sk_buff *gc_skb = NULL; + struct cw1200_txpriv gc_txpriv; + + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + gc_txpriv = item->txpriv; + gc_skb = item->skb; + item->skb = NULL; + --queue->num_pending; + --queue->num_queued; + ++queue->num_sent; + ++item->generation; + /* Do not use list_move_tail here, but list_move: + * try to utilize cache row. + */ + list_move(&item->head, &queue->free_pool); + + if (queue->overfull && + (queue->num_queued <= (queue->capacity >> 1))) { + queue->overfull = false; + __cw1200_queue_unlock(queue); + } + } + spin_unlock_bh(&queue->lock); + + if (gc_skb) + stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv); + + return ret; +} + +int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, + struct sk_buff **skb, + const struct cw1200_txpriv **txpriv) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + *skb = item->skb; + *txpriv = &item->txpriv; + } + spin_unlock_bh(&queue->lock); + return ret; +} + +void cw1200_queue_lock(struct cw1200_queue *queue) +{ + spin_lock_bh(&queue->lock); + __cw1200_queue_lock(queue); + spin_unlock_bh(&queue->lock); +} + +void cw1200_queue_unlock(struct cw1200_queue *queue) +{ + spin_lock_bh(&queue->lock); + __cw1200_queue_unlock(queue); + spin_unlock_bh(&queue->lock); +} + +bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, + unsigned long *timestamp, + u32 pending_frame_id) +{ + struct cw1200_queue_item *item; + bool ret; + + spin_lock_bh(&queue->lock); + ret = !list_empty(&queue->pending); + if (ret) { + list_for_each_entry(item, &queue->pending, head) { + if (item->packet_id != pending_frame_id) + if (time_before(item->xmit_timestamp, + *timestamp)) + *timestamp = item->xmit_timestamp; + } + } + spin_unlock_bh(&queue->lock); + return ret; +} + +bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, + u32 link_id_map) +{ + bool empty = true; + + spin_lock_bh(&stats->lock); + if (link_id_map == (u32)-1) { + empty = stats->num_queued == 0; + } else { + int i; + for (i = 0; i < stats->map_capacity; ++i) { + if (link_id_map & BIT(i)) { + if (stats->link_map_cache[i]) { + empty = false; + break; + } + } + } + } + spin_unlock_bh(&stats->lock); + + return empty; +} diff --git a/drivers/net/wireless/cw1200/queue.h b/drivers/net/wireless/cw1200/queue.h new file mode 100644 index 000000000000..119f9c79c14e --- /dev/null +++ b/drivers/net/wireless/cw1200/queue.h @@ -0,0 +1,116 @@ +/* + * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_QUEUE_H_INCLUDED +#define CW1200_QUEUE_H_INCLUDED + +/* private */ struct cw1200_queue_item; + +/* extern */ struct sk_buff; +/* extern */ struct wsm_tx; +/* extern */ struct cw1200_common; +/* extern */ struct ieee80211_tx_queue_stats; +/* extern */ struct cw1200_txpriv; + +/* forward */ struct cw1200_queue_stats; + +typedef void (*cw1200_queue_skb_dtor_t)(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv); + +struct cw1200_queue { + struct cw1200_queue_stats *stats; + size_t capacity; + size_t num_queued; + size_t num_pending; + size_t num_sent; + struct cw1200_queue_item *pool; + struct list_head queue; + struct list_head free_pool; + struct list_head pending; + int tx_locked_cnt; + int *link_map_cache; + bool overfull; + spinlock_t lock; /* Protect queue entry */ + u8 queue_id; + u8 generation; + struct timer_list gc; + unsigned long ttl; +}; + +struct cw1200_queue_stats { + spinlock_t lock; /* Protect stats entry */ + int *link_map_cache; + int num_queued; + size_t map_capacity; + wait_queue_head_t wait_link_id_empty; + cw1200_queue_skb_dtor_t skb_dtor; + struct cw1200_common *priv; +}; + +struct cw1200_txpriv { + u8 link_id; + u8 raw_link_id; + u8 tid; + u8 rate_id; + u8 offset; +}; + +int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, + size_t map_capacity, + cw1200_queue_skb_dtor_t skb_dtor, + struct cw1200_common *priv); +int cw1200_queue_init(struct cw1200_queue *queue, + struct cw1200_queue_stats *stats, + u8 queue_id, + size_t capacity, + unsigned long ttl); +int cw1200_queue_clear(struct cw1200_queue *queue); +void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats); +void cw1200_queue_deinit(struct cw1200_queue *queue); + +size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, + u32 link_id_map); +int cw1200_queue_put(struct cw1200_queue *queue, + struct sk_buff *skb, + struct cw1200_txpriv *txpriv); +int cw1200_queue_get(struct cw1200_queue *queue, + u32 link_id_map, + struct wsm_tx **tx, + struct ieee80211_tx_info **tx_info, + const struct cw1200_txpriv **txpriv); +int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id); +int cw1200_queue_requeue_all(struct cw1200_queue *queue); +int cw1200_queue_remove(struct cw1200_queue *queue, + u32 packet_id); +int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, + struct sk_buff **skb, + const struct cw1200_txpriv **txpriv); +void cw1200_queue_lock(struct cw1200_queue *queue); +void cw1200_queue_unlock(struct cw1200_queue *queue); +bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, + unsigned long *timestamp, + u32 pending_frame_id); + +bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, + u32 link_id_map); + +static inline u8 cw1200_queue_get_queue_id(u32 packet_id) +{ + return (packet_id >> 16) & 0xFF; +} + +static inline u8 cw1200_queue_get_generation(u32 packet_id) +{ + return (packet_id >> 8) & 0xFF; +} + +#endif /* CW1200_QUEUE_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/sbus.h b/drivers/net/wireless/cw1200/sbus.h new file mode 100644 index 000000000000..603fd25eaa4a --- /dev/null +++ b/drivers/net/wireless/cw1200/sbus.h @@ -0,0 +1,37 @@ +/* + * Common sbus abstraction layer interface for cw1200 wireless driver + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_SBUS_H +#define CW1200_SBUS_H + +/* + * sbus priv forward definition. + * Implemented and instantiated in particular modules. + */ +struct sbus_priv; + +void cw1200_irq_handler(struct cw1200_common *priv); + +/* This MUST be wrapped with sbus_ops->lock/unlock! */ +int __cw1200_irq_enable(struct cw1200_common *priv, int enable); + +struct sbus_ops { + int (*sbus_memcpy_fromio)(struct sbus_priv *self, unsigned int addr, + void *dst, int count); + int (*sbus_memcpy_toio)(struct sbus_priv *self, unsigned int addr, + const void *src, int count); + void (*lock)(struct sbus_priv *self); + void (*unlock)(struct sbus_priv *self); + size_t (*align_size)(struct sbus_priv *self, size_t size); + int (*power_mgmt)(struct sbus_priv *self, bool suspend); +}; + +#endif /* CW1200_SBUS_H */ diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c new file mode 100644 index 000000000000..ee3c19037aac --- /dev/null +++ b/drivers/net/wireless/cw1200/scan.c @@ -0,0 +1,461 @@ +/* + * Scan implementation for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include "cw1200.h" +#include "scan.h" +#include "sta.h" +#include "pm.h" + +static void cw1200_scan_restart_delayed(struct cw1200_common *priv); + +static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan) +{ + int ret, i; + int tmo = 2000; + + switch (priv->join_status) { + case CW1200_JOIN_STATUS_PRE_STA: + case CW1200_JOIN_STATUS_JOINING: + return -EBUSY; + default: + break; + } + + wiphy_dbg(priv->hw->wiphy, "[SCAN] hw req, type %d, %d channels, flags: 0x%x.\n", + scan->type, scan->num_channels, scan->flags); + + for (i = 0; i < scan->num_channels; ++i) + tmo += scan->ch[i].max_chan_time + 10; + + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + atomic_set(&priv->scan.in_progress, 1); + atomic_set(&priv->recent_scan, 1); + cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000); + queue_delayed_work(priv->workqueue, &priv->scan.timeout, + tmo * HZ / 1000); + ret = wsm_scan(priv, scan); + if (ret) { + atomic_set(&priv->scan.in_progress, 0); + cancel_delayed_work_sync(&priv->scan.timeout); + cw1200_scan_restart_delayed(priv); + } + return ret; +} + +int cw1200_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct cw1200_common *priv = hw->priv; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST, + }; + int i, ret; + + if (!priv->vif) + return -EINVAL; + + /* Scan when P2P_GO corrupt firmware MiniAP mode */ + if (priv->join_status == CW1200_JOIN_STATUS_AP) + return -EOPNOTSUPP; + + if (req->n_ssids == 1 && !req->ssids[0].ssid_len) + req->n_ssids = 0; + + wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n", + req->n_ssids); + + if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS) + return -EINVAL; + + frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0, + req->ie_len); + if (!frame.skb) + return -ENOMEM; + + if (req->ie_len) + memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len); + + /* will be unlocked in cw1200_scan_work() */ + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + + ret = wsm_set_template_frame(priv, &frame); + if (!ret) { + /* Host want to be the probe responder. */ + ret = wsm_set_probe_responder(priv, true); + } + if (ret) { + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); + dev_kfree_skb(frame.skb); + return ret; + } + + wsm_lock_tx(priv); + + BUG_ON(priv->scan.req); + priv->scan.req = req; + priv->scan.n_ssids = 0; + priv->scan.status = 0; + priv->scan.begin = &req->channels[0]; + priv->scan.curr = priv->scan.begin; + priv->scan.end = &req->channels[req->n_channels]; + priv->scan.output_power = priv->output_power; + + for (i = 0; i < req->n_ssids; ++i) { + struct wsm_ssid *dst = &priv->scan.ssids[priv->scan.n_ssids]; + memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid)); + dst->length = req->ssids[i].ssid_len; + ++priv->scan.n_ssids; + } + + mutex_unlock(&priv->conf_mutex); + + if (frame.skb) + dev_kfree_skb(frame.skb); + queue_work(priv->workqueue, &priv->scan.work); + return 0; +} + +void cw1200_scan_work(struct work_struct *work) +{ + struct cw1200_common *priv = container_of(work, struct cw1200_common, + scan.work); + struct ieee80211_channel **it; + struct wsm_scan scan = { + .type = WSM_SCAN_TYPE_FOREGROUND, + .flags = WSM_SCAN_FLAG_SPLIT_METHOD, + }; + bool first_run = (priv->scan.begin == priv->scan.curr && + priv->scan.begin != priv->scan.end); + int i; + + if (first_run) { + /* Firmware gets crazy if scan request is sent + * when STA is joined but not yet associated. + * Force unjoin in this case. + */ + if (cancel_delayed_work_sync(&priv->join_timeout) > 0) + cw1200_join_timeout(&priv->join_timeout.work); + } + + mutex_lock(&priv->conf_mutex); + + if (first_run) { + if (priv->join_status == CW1200_JOIN_STATUS_STA && + !(priv->powersave_mode.mode & WSM_PSM_PS)) { + struct wsm_set_pm pm = priv->powersave_mode; + pm.mode = WSM_PSM_PS; + cw1200_set_pm(priv, &pm); + } else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + /* FW bug: driver has to restart p2p-dev mode + * after scan + */ + cw1200_disable_listening(priv); + } + } + + if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) { + if (priv->scan.output_power != priv->output_power) + wsm_set_output_power(priv, priv->output_power * 10); + if (priv->join_status == CW1200_JOIN_STATUS_STA && + !(priv->powersave_mode.mode & WSM_PSM_PS)) + cw1200_set_pm(priv, &priv->powersave_mode); + + if (priv->scan.status < 0) + wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n", + priv->scan.status); + else if (priv->scan.req) + wiphy_dbg(priv->hw->wiphy, + "[SCAN] Scan completed.\n"); + else + wiphy_dbg(priv->hw->wiphy, + "[SCAN] Scan canceled.\n"); + + priv->scan.req = NULL; + cw1200_scan_restart_delayed(priv); + wsm_unlock_tx(priv); + mutex_unlock(&priv->conf_mutex); + ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0); + up(&priv->scan.lock); + return; + } else { + struct ieee80211_channel *first = *priv->scan.curr; + for (it = priv->scan.curr + 1, i = 1; + it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS; + ++it, ++i) { + if ((*it)->band != first->band) + break; + if (((*it)->flags ^ first->flags) & + IEEE80211_CHAN_PASSIVE_SCAN) + break; + if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) && + (*it)->max_power != first->max_power) + break; + } + scan.band = first->band; + + if (priv->scan.req->no_cck) + scan.max_tx_rate = WSM_TRANSMIT_RATE_6; + else + scan.max_tx_rate = WSM_TRANSMIT_RATE_1; + scan.num_probes = + (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2; + scan.num_ssids = priv->scan.n_ssids; + scan.ssids = &priv->scan.ssids[0]; + scan.num_channels = it - priv->scan.curr; + /* TODO: Is it optimal? */ + scan.probe_delay = 100; + /* It is not stated in WSM specification, however + * FW team says that driver may not use FG scan + * when joined. + */ + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + scan.type = WSM_SCAN_TYPE_BACKGROUND; + scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; + } + scan.ch = kzalloc( + sizeof(struct wsm_scan_ch) * (it - priv->scan.curr), + GFP_KERNEL); + if (!scan.ch) { + priv->scan.status = -ENOMEM; + goto fail; + } + for (i = 0; i < scan.num_channels; ++i) { + scan.ch[i].number = priv->scan.curr[i]->hw_value; + if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) { + scan.ch[i].min_chan_time = 50; + scan.ch[i].max_chan_time = 100; + } else { + scan.ch[i].min_chan_time = 10; + scan.ch[i].max_chan_time = 25; + } + } + if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) && + priv->scan.output_power != first->max_power) { + priv->scan.output_power = first->max_power; + wsm_set_output_power(priv, + priv->scan.output_power * 10); + } + priv->scan.status = cw1200_scan_start(priv, &scan); + kfree(scan.ch); + if (priv->scan.status) + goto fail; + priv->scan.curr = it; + } + mutex_unlock(&priv->conf_mutex); + return; + +fail: + priv->scan.curr = priv->scan.end; + mutex_unlock(&priv->conf_mutex); + queue_work(priv->workqueue, &priv->scan.work); + return; +} + +static void cw1200_scan_restart_delayed(struct cw1200_common *priv) +{ + /* FW bug: driver has to restart p2p-dev mode after scan. */ + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + cw1200_enable_listening(priv); + cw1200_update_filtering(priv); + } + + if (priv->delayed_unjoin) { + priv->delayed_unjoin = false; + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else if (priv->delayed_link_loss) { + wiphy_dbg(priv->hw->wiphy, "[CQM] Requeue BSS loss.\n"); + priv->delayed_link_loss = 0; + cw1200_cqm_bssloss_sm(priv, 1, 0, 0); + } +} + +static void cw1200_scan_complete(struct cw1200_common *priv) +{ + queue_delayed_work(priv->workqueue, &priv->clear_recent_scan_work, HZ); + if (priv->scan.direct_probe) { + wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe complete.\n"); + cw1200_scan_restart_delayed(priv); + priv->scan.direct_probe = 0; + up(&priv->scan.lock); + wsm_unlock_tx(priv); + } else { + cw1200_scan_work(&priv->scan.work); + } +} + +void cw1200_scan_failed_cb(struct cw1200_common *priv) +{ + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + /* STA is stopped. */ + return; + + if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) { + priv->scan.status = -EIO; + queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0); + } +} + + +void cw1200_scan_complete_cb(struct cw1200_common *priv, + struct wsm_scan_complete *arg) +{ + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + /* STA is stopped. */ + return; + + if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) { + priv->scan.status = 1; + queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0); + } +} + +void cw1200_clear_recent_scan_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + clear_recent_scan_work.work); + atomic_xchg(&priv->recent_scan, 0); +} + +void cw1200_scan_timeout(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, scan.timeout.work); + if (atomic_xchg(&priv->scan.in_progress, 0)) { + if (priv->scan.status > 0) { + priv->scan.status = 0; + } else if (!priv->scan.status) { + wiphy_warn(priv->hw->wiphy, + "Timeout waiting for scan complete notification.\n"); + priv->scan.status = -ETIMEDOUT; + priv->scan.curr = priv->scan.end; + wsm_stop_scan(priv); + } + cw1200_scan_complete(priv); + } +} + +void cw1200_probe_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, scan.probe_work.work); + u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + const struct cw1200_txpriv *txpriv; + struct wsm_tx *wsm; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST, + }; + struct wsm_ssid ssids[1] = {{ + .length = 0, + } }; + struct wsm_scan_ch ch[1] = {{ + .min_chan_time = 0, + .max_chan_time = 10, + } }; + struct wsm_scan scan = { + .type = WSM_SCAN_TYPE_FOREGROUND, + .num_probes = 1, + .probe_delay = 0, + .num_channels = 1, + .ssids = ssids, + .ch = ch, + }; + u8 *ies; + size_t ies_len; + int ret; + + wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n"); + + mutex_lock(&priv->conf_mutex); + if (down_trylock(&priv->scan.lock)) { + /* Scan is already in progress. Requeue self. */ + schedule(); + queue_delayed_work(priv->workqueue, + &priv->scan.probe_work, HZ / 10); + mutex_unlock(&priv->conf_mutex); + return; + } + + /* Make sure we still have a pending probe req */ + if (cw1200_queue_get_skb(queue, priv->pending_frame_id, + &frame.skb, &txpriv)) { + up(&priv->scan.lock); + mutex_unlock(&priv->conf_mutex); + wsm_unlock_tx(priv); + return; + } + wsm = (struct wsm_tx *)frame.skb->data; + scan.max_tx_rate = wsm->max_tx_rate; + scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + if (priv->join_status == CW1200_JOIN_STATUS_STA || + priv->join_status == CW1200_JOIN_STATUS_IBSS) { + scan.type = WSM_SCAN_TYPE_BACKGROUND; + scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; + } + ch[0].number = priv->channel->hw_value; + + skb_pull(frame.skb, txpriv->offset); + + ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)]; + ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr); + + if (ies_len) { + u8 *ssidie = + (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len); + if (ssidie && ssidie[1] && ssidie[1] <= sizeof(ssids[0].ssid)) { + u8 *nextie = &ssidie[2 + ssidie[1]]; + /* Remove SSID from the IE list. It has to be provided + * as a separate argument in cw1200_scan_start call + */ + + /* Store SSID localy */ + ssids[0].length = ssidie[1]; + memcpy(ssids[0].ssid, &ssidie[2], ssids[0].length); + scan.num_ssids = 1; + + /* Remove SSID from IE list */ + ssidie[1] = 0; + memmove(&ssidie[2], nextie, &ies[ies_len] - nextie); + skb_trim(frame.skb, frame.skb->len - ssids[0].length); + } + } + + /* FW bug: driver has to restart p2p-dev mode after scan */ + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + cw1200_disable_listening(priv); + ret = wsm_set_template_frame(priv, &frame); + priv->scan.direct_probe = 1; + if (!ret) { + wsm_flush_tx(priv); + ret = cw1200_scan_start(priv, &scan); + } + mutex_unlock(&priv->conf_mutex); + + skb_push(frame.skb, txpriv->offset); + if (!ret) + IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK; + BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id)); + + if (ret) { + priv->scan.direct_probe = 0; + up(&priv->scan.lock); + wsm_unlock_tx(priv); + } + + return; +} diff --git a/drivers/net/wireless/cw1200/scan.h b/drivers/net/wireless/cw1200/scan.h new file mode 100644 index 000000000000..5a8296ccfa82 --- /dev/null +++ b/drivers/net/wireless/cw1200/scan.h @@ -0,0 +1,56 @@ +/* + * Scan interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SCAN_H_INCLUDED +#define SCAN_H_INCLUDED + +#include +#include "wsm.h" + +/* external */ struct sk_buff; +/* external */ struct cfg80211_scan_request; +/* external */ struct ieee80211_channel; +/* external */ struct ieee80211_hw; +/* external */ struct work_struct; + +struct cw1200_scan { + struct semaphore lock; + struct work_struct work; + struct delayed_work timeout; + struct cfg80211_scan_request *req; + struct ieee80211_channel **begin; + struct ieee80211_channel **curr; + struct ieee80211_channel **end; + struct wsm_ssid ssids[WSM_SCAN_MAX_NUM_OF_SSIDS]; + int output_power; + int n_ssids; + int status; + atomic_t in_progress; + /* Direct probe requests workaround */ + struct delayed_work probe_work; + int direct_probe; +}; + +int cw1200_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +void cw1200_scan_work(struct work_struct *work); +void cw1200_scan_timeout(struct work_struct *work); +void cw1200_clear_recent_scan_work(struct work_struct *work); +void cw1200_scan_complete_cb(struct cw1200_common *priv, + struct wsm_scan_complete *arg); +void cw1200_scan_failed_cb(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* Raw probe requests TX workaround */ +void cw1200_probe_work(struct work_struct *work); + +#endif diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c new file mode 100644 index 000000000000..679c55f15c67 --- /dev/null +++ b/drivers/net/wireless/cw1200/sta.c @@ -0,0 +1,2406 @@ +/* + * Mac80211 STA API for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "cw1200.h" +#include "sta.h" +#include "fwio.h" +#include "bh.h" +#include "debug.h" + +#ifndef ERP_INFO_BYTE_OFFSET +#define ERP_INFO_BYTE_OFFSET 2 +#endif + +static void cw1200_do_join(struct cw1200_common *priv); +static void cw1200_do_unjoin(struct cw1200_common *priv); + +static int cw1200_upload_beacon(struct cw1200_common *priv); +static int cw1200_upload_pspoll(struct cw1200_common *priv); +static int cw1200_upload_null(struct cw1200_common *priv); +static int cw1200_upload_qosnull(struct cw1200_common *priv); +static int cw1200_start_ap(struct cw1200_common *priv); +static int cw1200_update_beaconing(struct cw1200_common *priv); +static int cw1200_enable_beaconing(struct cw1200_common *priv, + bool enable); +static void __cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + int link_id); +static int __cw1200_flush(struct cw1200_common *priv, bool drop); + +static inline void __cw1200_free_event_queue(struct list_head *list) +{ + struct cw1200_wsm_event *event, *tmp; + list_for_each_entry_safe(event, tmp, list, link) { + list_del(&event->link); + kfree(event); + } +} + +/* ******************************************************************** */ +/* STA API */ + +int cw1200_start(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int ret = 0; + + cw1200_pm_stay_awake(&priv->pm_state, HZ); + + mutex_lock(&priv->conf_mutex); + + /* default EDCA */ + WSM_EDCA_SET(&priv->edca, 0, 0x0002, 0x0003, 0x0007, 47, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 1, 0x0002, 0x0007, 0x000f, 94, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 2, 0x0003, 0x000f, 0x03ff, 0, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 3, 0x0007, 0x000f, 0x03ff, 0, 0xc8, false); + ret = wsm_set_edca_params(priv, &priv->edca); + if (ret) + goto out; + + ret = cw1200_set_uapsd_param(priv, &priv->edca); + if (ret) + goto out; + + priv->setbssparams_done = false; + + memcpy(priv->mac_addr, dev->wiphy->perm_addr, ETH_ALEN); + priv->mode = NL80211_IFTYPE_MONITOR; + priv->wep_default_key_id = -1; + + priv->cqm_beacon_loss_count = 10; + + ret = cw1200_setup_mac(priv); + if (ret) + goto out; + +out: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_stop(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + LIST_HEAD(list); + int i; + + wsm_lock_tx(priv); + + while (down_trylock(&priv->scan.lock)) { + /* Scan is in progress. Force it to stop. */ + priv->scan.req = NULL; + schedule(); + } + up(&priv->scan.lock); + + cancel_delayed_work_sync(&priv->scan.probe_work); + cancel_delayed_work_sync(&priv->scan.timeout); + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + cancel_delayed_work_sync(&priv->join_timeout); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + cancel_delayed_work_sync(&priv->link_id_gc_work); + flush_workqueue(priv->workqueue); + del_timer_sync(&priv->mcast_timeout); + mutex_lock(&priv->conf_mutex); + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->listening = false; + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + __cw1200_free_event_queue(&list); + + + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + priv->join_pending = false; + + for (i = 0; i < 4; i++) + cw1200_queue_clear(&priv->tx_queue[i]); + mutex_unlock(&priv->conf_mutex); + tx_policy_clean(priv); + + /* HACK! */ + if (atomic_xchg(&priv->tx_lock, 1) != 1) + pr_debug("[STA] TX is force-unlocked due to stop request.\n"); + + wsm_unlock_tx(priv); + atomic_xchg(&priv->tx_lock, 0); /* for recovery to work */ +} + +static int cw1200_bssloss_mitigation = 1; +module_param(cw1200_bssloss_mitigation, int, 0644); +MODULE_PARM_DESC(cw1200_bssloss_mitigation, "BSS Loss mitigation. 0 == disabled, 1 == enabled (default)"); + + +void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, + int init, int good, int bad) +{ + int tx = 0; + + priv->delayed_link_loss = 0; + cancel_work_sync(&priv->bss_params_work); + + pr_debug("[STA] CQM BSSLOSS_SM: state: %d init %d good %d bad: %d txlock: %d uj: %d\n", + priv->bss_loss_state, + init, good, bad, + atomic_read(&priv->tx_lock), + priv->delayed_unjoin); + + /* If we have a pending unjoin */ + if (priv->delayed_unjoin) + return; + + if (init) { + queue_delayed_work(priv->workqueue, + &priv->bss_loss_work, + HZ); + priv->bss_loss_state = 0; + + /* Skip the confimration procedure in P2P case */ + if (!priv->vif->p2p && !atomic_read(&priv->tx_lock)) + tx = 1; + } else if (good) { + cancel_delayed_work_sync(&priv->bss_loss_work); + priv->bss_loss_state = 0; + queue_work(priv->workqueue, &priv->bss_params_work); + } else if (bad) { + /* XXX Should we just keep going until we time out? */ + if (priv->bss_loss_state < 3) + tx = 1; + } else { + cancel_delayed_work_sync(&priv->bss_loss_work); + priv->bss_loss_state = 0; + } + + /* Bypass mitigation if it's disabled */ + if (!cw1200_bssloss_mitigation) + tx = 0; + + /* Spit out a NULL packet to our AP if necessary */ + if (tx) { + struct sk_buff *skb; + + priv->bss_loss_state++; + + skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + WARN_ON(!skb); + if (skb) + cw1200_tx(priv->hw, NULL, skb); + } +} + +int cw1200_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + int ret; + struct cw1200_common *priv = dev->priv; + /* __le32 auto_calibration_mode = __cpu_to_le32(1); */ + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + + mutex_lock(&priv->conf_mutex); + + if (priv->mode != NL80211_IFTYPE_MONITOR) { + mutex_unlock(&priv->conf_mutex); + return -EOPNOTSUPP; + } + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + priv->mode = vif->type; + break; + default: + mutex_unlock(&priv->conf_mutex); + return -EOPNOTSUPP; + } + + priv->vif = vif; + memcpy(priv->mac_addr, vif->addr, ETH_ALEN); + ret = cw1200_setup_mac(priv); + /* Enable auto-calibration */ + /* Exception in subsequent channel switch; disabled. + * wsm_write_mib(priv, WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE, + * &auto_calibration_mode, sizeof(auto_calibration_mode)); + */ + + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct cw1200_common *priv = dev->priv; + struct wsm_reset reset = { + .reset_statistics = true, + }; + int i; + + mutex_lock(&priv->conf_mutex); + switch (priv->join_status) { + case CW1200_JOIN_STATUS_JOINING: + case CW1200_JOIN_STATUS_PRE_STA: + case CW1200_JOIN_STATUS_STA: + case CW1200_JOIN_STATUS_IBSS: + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + break; + case CW1200_JOIN_STATUS_AP: + for (i = 0; priv->link_id_map; ++i) { + if (priv->link_id_map & BIT(i)) { + reset.link_id = i; + wsm_reset(priv, &reset); + priv->link_id_map &= ~BIT(i); + } + } + memset(priv->link_id_db, 0, sizeof(priv->link_id_db)); + priv->sta_asleep_mask = 0; + priv->enable_beacon = false; + priv->tx_multicast = false; + priv->aid0_bit_set = false; + priv->buffered_multicasts = false; + priv->pspoll_mask = 0; + reset.link_id = 0; + wsm_reset(priv, &reset); + break; + case CW1200_JOIN_STATUS_MONITOR: + cw1200_update_listening(priv, false); + break; + default: + break; + } + priv->vif = NULL; + priv->mode = NL80211_IFTYPE_MONITOR; + memset(priv->mac_addr, 0, ETH_ALEN); + memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo)); + cw1200_free_keys(priv); + cw1200_setup_mac(priv); + priv->listening = false; + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + if (!__cw1200_flush(priv, true)) + wsm_unlock_tx(priv); + + mutex_unlock(&priv->conf_mutex); +} + +int cw1200_change_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, + bool p2p) +{ + int ret = 0; + pr_debug("change_interface new: %d (%d), old: %d (%d)\n", new_type, + p2p, vif->type, vif->p2p); + + if (new_type != vif->type || vif->p2p != p2p) { + cw1200_remove_interface(dev, vif); + vif->type = new_type; + vif->p2p = p2p; + ret = cw1200_add_interface(dev, vif); + } + + return ret; +} + +int cw1200_config(struct ieee80211_hw *dev, u32 changed) +{ + int ret = 0; + struct cw1200_common *priv = dev->priv; + struct ieee80211_conf *conf = &dev->conf; + + pr_debug("CONFIG CHANGED: %08x\n", changed); + + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + /* TODO: IEEE80211_CONF_CHANGE_QOS */ + /* TODO: IEEE80211_CONF_CHANGE_LISTEN_INTERVAL */ + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + priv->output_power = conf->power_level; + pr_debug("[STA] TX power: %d\n", priv->output_power); + wsm_set_output_power(priv, priv->output_power * 10); + } + + if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) && + (priv->channel != conf->chandef.chan)) { + struct ieee80211_channel *ch = conf->chandef.chan; + struct wsm_switch_channel channel = { + .channel_number = ch->hw_value, + }; + pr_debug("[STA] Freq %d (wsm ch: %d).\n", + ch->center_freq, ch->hw_value); + + /* __cw1200_flush() implicitly locks tx, if successful */ + if (!__cw1200_flush(priv, false)) { + if (!wsm_switch_channel(priv, &channel)) { + ret = wait_event_timeout(priv->channel_switch_done, + !priv->channel_switch_in_progress, + 3 * HZ); + if (ret) { + /* Already unlocks if successful */ + priv->channel = ch; + ret = 0; + } else { + ret = -ETIMEDOUT; + } + } else { + /* Unlock if switch channel fails */ + wsm_unlock_tx(priv); + } + } + } + + if (changed & IEEE80211_CONF_CHANGE_PS) { + if (!(conf->flags & IEEE80211_CONF_PS)) + priv->powersave_mode.mode = WSM_PSM_ACTIVE; + else if (conf->dynamic_ps_timeout <= 0) + priv->powersave_mode.mode = WSM_PSM_PS; + else + priv->powersave_mode.mode = WSM_PSM_FAST_PS; + + /* Firmware requires that value for this 1-byte field must + * be specified in units of 500us. Values above the 128ms + * threshold are not supported. + */ + if (conf->dynamic_ps_timeout >= 0x80) + priv->powersave_mode.fast_psm_idle_period = 0xFF; + else + priv->powersave_mode.fast_psm_idle_period = + conf->dynamic_ps_timeout << 1; + + if (priv->join_status == CW1200_JOIN_STATUS_STA && + priv->bss_params.aid) + cw1200_set_pm(priv, &priv->powersave_mode); + } + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + /* TBD: It looks like it's transparent + * there's a monitor interface present -- use this + * to determine for example whether to calculate + * timestamps for packets or not, do not use instead + * of filter flags! + */ + } + + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + wsm_lock_tx(priv); + /* Disable p2p-dev mode forced by TX request */ + if ((priv->join_status == CW1200_JOIN_STATUS_MONITOR) && + (conf->flags & IEEE80211_CONF_IDLE) && + !priv->listening) { + cw1200_disable_listening(priv); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + } + wsm_set_operational_mode(priv, &mode); + wsm_unlock_tx(priv); + } + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { + pr_debug("[STA] Retry limits: %d (long), %d (short).\n", + conf->long_frame_max_tx_count, + conf->short_frame_max_tx_count); + spin_lock_bh(&priv->tx_policy_cache.lock); + priv->long_frame_max_tx_count = conf->long_frame_max_tx_count; + priv->short_frame_max_tx_count = + (conf->short_frame_max_tx_count < 0x0F) ? + conf->short_frame_max_tx_count : 0x0F; + priv->hw->max_rate_tries = priv->short_frame_max_tx_count; + spin_unlock_bh(&priv->tx_policy_cache.lock); + } + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); + return ret; +} + +void cw1200_update_filtering(struct cw1200_common *priv) +{ + int ret; + bool bssid_filtering = !priv->rx_filter.bssid; + bool is_p2p = priv->vif && priv->vif->p2p; + bool is_sta = priv->vif && NL80211_IFTYPE_STATION == priv->vif->type; + + static struct wsm_beacon_filter_control bf_ctrl; + static struct wsm_mib_beacon_filter_table bf_tbl = { + .entry[0].ie_id = WLAN_EID_VENDOR_SPECIFIC, + .entry[0].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + .entry[0].oui[0] = 0x50, + .entry[0].oui[1] = 0x6F, + .entry[0].oui[2] = 0x9A, + .entry[1].ie_id = WLAN_EID_HT_OPERATION, + .entry[1].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + .entry[2].ie_id = WLAN_EID_ERP_INFO, + .entry[2].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + }; + + if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) + return; + else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + bssid_filtering = false; + + if (priv->disable_beacon_filter) { + bf_ctrl.enabled = 0; + bf_ctrl.bcn_count = 1; + bf_tbl.num = __cpu_to_le32(0); + } else if (is_p2p || !is_sta) { + bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE | + WSM_BEACON_FILTER_AUTO_ERP; + bf_ctrl.bcn_count = 0; + bf_tbl.num = __cpu_to_le32(2); + } else { + bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE; + bf_ctrl.bcn_count = 0; + bf_tbl.num = __cpu_to_le32(3); + } + + /* + * When acting as p2p client being connected to p2p GO, in order to + * receive frames from a different p2p device, turn off bssid filter. + * + * WARNING: FW dependency! + * This can only be used with FW WSM371 and its successors. + * In that FW version even with bssid filter turned off, + * device will block most of the unwanted frames. + */ + if (is_p2p) + bssid_filtering = false; + + ret = wsm_set_rx_filter(priv, &priv->rx_filter); + if (!ret) + ret = wsm_set_beacon_filter_table(priv, &bf_tbl); + if (!ret) + ret = wsm_beacon_filter_control(priv, &bf_ctrl); + if (!ret) + ret = wsm_set_bssid_filtering(priv, bssid_filtering); + if (!ret) + ret = wsm_set_multicast_filter(priv, &priv->multicast_filter); + if (ret) + wiphy_err(priv->hw->wiphy, + "Update filtering failed: %d.\n", ret); + return; +} + +void cw1200_update_filtering_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + update_filtering_work); + + cw1200_update_filtering(priv); +} + +void cw1200_set_beacon_wakeup_period_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + set_beacon_wakeup_period_work); + + wsm_set_beacon_wakeup_period(priv, + priv->beacon_int * priv->join_dtim_period > + MAX_BEACON_SKIP_TIME_MS ? 1 : + priv->join_dtim_period, 0); +} + +u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) +{ + static u8 broadcast_ipv6[ETH_ALEN] = { + 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 + }; + static u8 broadcast_ipv4[ETH_ALEN] = { + 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01 + }; + struct cw1200_common *priv = hw->priv; + struct netdev_hw_addr *ha; + int count = 0; + + /* Disable multicast filtering */ + priv->has_multicast_subscription = false; + memset(&priv->multicast_filter, 0x00, sizeof(priv->multicast_filter)); + + if (netdev_hw_addr_list_count(mc_list) > WSM_MAX_GRP_ADDRTABLE_ENTRIES) + return 0; + + /* Enable if requested */ + netdev_hw_addr_list_for_each(ha, mc_list) { + pr_debug("[STA] multicast: %pM\n", ha->addr); + memcpy(&priv->multicast_filter.macaddrs[count], + ha->addr, ETH_ALEN); + if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) && + memcmp(ha->addr, broadcast_ipv6, ETH_ALEN)) + priv->has_multicast_subscription = true; + count++; + } + + if (count) { + priv->multicast_filter.enable = __cpu_to_le32(1); + priv->multicast_filter.num_addrs = __cpu_to_le32(count); + } + + return netdev_hw_addr_list_count(mc_list); +} + +void cw1200_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct cw1200_common *priv = dev->priv; + bool listening = !!(*total_flags & + (FIF_PROMISC_IN_BSS | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC | + FIF_PROBE_REQ)); + + *total_flags &= FIF_PROMISC_IN_BSS | + FIF_OTHER_BSS | + FIF_FCSFAIL | + FIF_BCN_PRBRESP_PROMISC | + FIF_PROBE_REQ; + + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + + priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS) + ? 1 : 0; + priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS | + FIF_PROBE_REQ)) ? 1 : 0; + priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0; + priv->disable_beacon_filter = !(*total_flags & + (FIF_BCN_PRBRESP_PROMISC | + FIF_PROMISC_IN_BSS | + FIF_PROBE_REQ)); + if (priv->listening != listening) { + priv->listening = listening; + wsm_lock_tx(priv); + cw1200_update_listening(priv, listening); + wsm_unlock_tx(priv); + } + cw1200_update_filtering(priv); + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); +} + +int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct cw1200_common *priv = dev->priv; + int ret = 0; + /* To prevent re-applying PM request OID again and again*/ + bool old_uapsd_flags; + + mutex_lock(&priv->conf_mutex); + + if (queue < dev->queues) { + old_uapsd_flags = priv->uapsd_info.uapsd_flags; + + WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0); + ret = wsm_set_tx_queue_params(priv, + &priv->tx_queue_params.params[queue], queue); + if (ret) { + ret = -EINVAL; + goto out; + } + + WSM_EDCA_SET(&priv->edca, queue, params->aifs, + params->cw_min, params->cw_max, + params->txop, 0xc8, + params->uapsd); + ret = wsm_set_edca_params(priv, &priv->edca); + if (ret) { + ret = -EINVAL; + goto out; + } + + if (priv->mode == NL80211_IFTYPE_STATION) { + ret = cw1200_set_uapsd_param(priv, &priv->edca); + if (!ret && priv->setbssparams_done && + (priv->join_status == CW1200_JOIN_STATUS_STA) && + (old_uapsd_flags != priv->uapsd_info.uapsd_flags)) + ret = cw1200_set_pm(priv, &priv->powersave_mode); + } + } else { + ret = -EINVAL; + } + +out: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +int cw1200_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + struct cw1200_common *priv = dev->priv; + + memcpy(stats, &priv->stats, sizeof(*stats)); + return 0; +} + +int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg) +{ + struct wsm_set_pm pm = *arg; + + if (priv->uapsd_info.uapsd_flags != 0) + pm.mode &= ~WSM_PSM_FAST_PS_FLAG; + + if (memcmp(&pm, &priv->firmware_ps_mode, + sizeof(struct wsm_set_pm))) { + priv->firmware_ps_mode = pm; + return wsm_set_pm(priv, &pm); + } else { + return 0; + } +} + +int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + int ret = -EOPNOTSUPP; + struct cw1200_common *priv = dev->priv; + struct ieee80211_key_seq seq; + + mutex_lock(&priv->conf_mutex); + + if (cmd == SET_KEY) { + u8 *peer_addr = NULL; + int pairwise = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ? + 1 : 0; + int idx = cw1200_alloc_key(priv); + struct wsm_add_key *wsm_key = &priv->keys[idx]; + + if (idx < 0) { + ret = -EINVAL; + goto finally; + } + + if (sta) + peer_addr = sta->addr; + + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + if (key->keylen > 16) { + cw1200_free_key(priv, idx); + ret = -EINVAL; + goto finally; + } + + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_WEP_PAIRWISE; + memcpy(wsm_key->wep_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->wep_pairwise.keydata, + &key->key[0], key->keylen); + wsm_key->wep_pairwise.keylen = key->keylen; + } else { + wsm_key->type = WSM_KEY_TYPE_WEP_DEFAULT; + memcpy(wsm_key->wep_group.keydata, + &key->key[0], key->keylen); + wsm_key->wep_group.keylen = key->keylen; + wsm_key->wep_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_TKIP: + ieee80211_get_key_rx_seq(key, 0, &seq); + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_TKIP_PAIRWISE; + memcpy(wsm_key->tkip_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->tkip_pairwise.keydata, + &key->key[0], 16); + memcpy(wsm_key->tkip_pairwise.tx_mic_key, + &key->key[16], 8); + memcpy(wsm_key->tkip_pairwise.rx_mic_key, + &key->key[24], 8); + } else { + size_t mic_offset = + (priv->mode == NL80211_IFTYPE_AP) ? + 16 : 24; + wsm_key->type = WSM_KEY_TYPE_TKIP_GROUP; + memcpy(wsm_key->tkip_group.keydata, + &key->key[0], 16); + memcpy(wsm_key->tkip_group.rx_mic_key, + &key->key[mic_offset], 8); + + wsm_key->tkip_group.rx_seqnum[0] = seq.tkip.iv16 & 0xff; + wsm_key->tkip_group.rx_seqnum[1] = (seq.tkip.iv16 >> 8) & 0xff; + wsm_key->tkip_group.rx_seqnum[2] = seq.tkip.iv32 & 0xff; + wsm_key->tkip_group.rx_seqnum[3] = (seq.tkip.iv32 >> 8) & 0xff; + wsm_key->tkip_group.rx_seqnum[4] = (seq.tkip.iv32 >> 16) & 0xff; + wsm_key->tkip_group.rx_seqnum[5] = (seq.tkip.iv32 >> 24) & 0xff; + wsm_key->tkip_group.rx_seqnum[6] = 0; + wsm_key->tkip_group.rx_seqnum[7] = 0; + + wsm_key->tkip_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_CCMP: + ieee80211_get_key_rx_seq(key, 0, &seq); + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_AES_PAIRWISE; + memcpy(wsm_key->aes_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->aes_pairwise.keydata, + &key->key[0], 16); + } else { + wsm_key->type = WSM_KEY_TYPE_AES_GROUP; + memcpy(wsm_key->aes_group.keydata, + &key->key[0], 16); + + wsm_key->aes_group.rx_seqnum[0] = seq.ccmp.pn[5]; + wsm_key->aes_group.rx_seqnum[1] = seq.ccmp.pn[4]; + wsm_key->aes_group.rx_seqnum[2] = seq.ccmp.pn[3]; + wsm_key->aes_group.rx_seqnum[3] = seq.ccmp.pn[2]; + wsm_key->aes_group.rx_seqnum[4] = seq.ccmp.pn[1]; + wsm_key->aes_group.rx_seqnum[5] = seq.ccmp.pn[0]; + wsm_key->aes_group.rx_seqnum[6] = 0; + wsm_key->aes_group.rx_seqnum[7] = 0; + wsm_key->aes_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_SMS4: + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_WAPI_PAIRWISE; + memcpy(wsm_key->wapi_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->wapi_pairwise.keydata, + &key->key[0], 16); + memcpy(wsm_key->wapi_pairwise.mic_key, + &key->key[16], 16); + wsm_key->wapi_pairwise.keyid = key->keyidx; + } else { + wsm_key->type = WSM_KEY_TYPE_WAPI_GROUP; + memcpy(wsm_key->wapi_group.keydata, + &key->key[0], 16); + memcpy(wsm_key->wapi_group.mic_key, + &key->key[16], 16); + wsm_key->wapi_group.keyid = key->keyidx; + } + break; + default: + pr_warn("Unhandled key type %d\n", key->cipher); + cw1200_free_key(priv, idx); + ret = -EOPNOTSUPP; + goto finally; + } + ret = wsm_add_key(priv, wsm_key); + if (!ret) + key->hw_key_idx = idx; + else + cw1200_free_key(priv, idx); + } else if (cmd == DISABLE_KEY) { + struct wsm_remove_key wsm_key = { + .index = key->hw_key_idx, + }; + + if (wsm_key.index > WSM_KEY_MAX_INDEX) { + ret = -EINVAL; + goto finally; + } + + cw1200_free_key(priv, wsm_key.index); + ret = wsm_remove_key(priv, &wsm_key); + } else { + pr_warn("Unhandled key command %d\n", cmd); + } + +finally: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_wep_key_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, wep_key_work); + u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + __le32 wep_default_key_id = __cpu_to_le32( + priv->wep_default_key_id); + + pr_debug("[STA] Setting default WEP key: %d\n", + priv->wep_default_key_id); + wsm_flush_tx(priv); + wsm_write_mib(priv, WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID, + &wep_default_key_id, sizeof(wep_default_key_id)); + cw1200_queue_requeue(queue, priv->pending_frame_id); + wsm_unlock_tx(priv); +} + +int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + int ret = 0; + __le32 val32; + struct cw1200_common *priv = hw->priv; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + return 0; + + if (value != (u32) -1) + val32 = __cpu_to_le32(value); + else + val32 = 0; /* disabled */ + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* device is down, can _not_ set threshold */ + ret = -ENODEV; + goto out; + } + + if (priv->rts_threshold == value) + goto out; + + pr_debug("[STA] Setting RTS threshold: %d\n", + priv->rts_threshold); + + /* mutex_lock(&priv->conf_mutex); */ + ret = wsm_write_mib(priv, WSM_MIB_ID_DOT11_RTS_THRESHOLD, + &val32, sizeof(val32)); + if (!ret) + priv->rts_threshold = value; + /* mutex_unlock(&priv->conf_mutex); */ + +out: + return ret; +} + +/* If successful, LOCKS the TX queue! */ +static int __cw1200_flush(struct cw1200_common *priv, bool drop) +{ + int i, ret; + + for (;;) { + /* TODO: correct flush handling is required when dev_stop. + * Temporary workaround: 2s + */ + if (drop) { + for (i = 0; i < 4; ++i) + cw1200_queue_clear(&priv->tx_queue[i]); + } else { + ret = wait_event_timeout( + priv->tx_queue_stats.wait_link_id_empty, + cw1200_queue_stats_is_empty( + &priv->tx_queue_stats, -1), + 2 * HZ); + } + + if (!drop && ret <= 0) { + ret = -ETIMEDOUT; + break; + } else { + ret = 0; + } + + wsm_lock_tx(priv); + if (!cw1200_queue_stats_is_empty(&priv->tx_queue_stats, -1)) { + /* Highly unlikely: WSM requeued frames. */ + wsm_unlock_tx(priv); + continue; + } + break; + } + return ret; +} + +void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +{ + struct cw1200_common *priv = hw->priv; + + switch (priv->mode) { + case NL80211_IFTYPE_MONITOR: + drop = true; + break; + case NL80211_IFTYPE_AP: + if (!priv->enable_beacon) + drop = true; + break; + } + + if (!__cw1200_flush(priv, drop)) + wsm_unlock_tx(priv); + + return; +} + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_free_event_queue(struct cw1200_common *priv) +{ + LIST_HEAD(list); + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + + __cw1200_free_event_queue(&list); +} + +void cw1200_event_handler(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, event_handler); + struct cw1200_wsm_event *event; + LIST_HEAD(list); + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + + list_for_each_entry(event, &list, link) { + switch (event->evt.id) { + case WSM_EVENT_ERROR: + pr_err("Unhandled WSM Error from LMAC\n"); + break; + case WSM_EVENT_BSS_LOST: + pr_debug("[CQM] BSS lost.\n"); + cancel_work_sync(&priv->unjoin_work); + if (!down_trylock(&priv->scan.lock)) { + cw1200_cqm_bssloss_sm(priv, 1, 0, 0); + up(&priv->scan.lock); + } else { + /* Scan is in progress. Delay reporting. + * Scan complete will trigger bss_loss_work + */ + priv->delayed_link_loss = 1; + /* Also start a watchdog. */ + queue_delayed_work(priv->workqueue, + &priv->bss_loss_work, 5*HZ); + } + break; + case WSM_EVENT_BSS_REGAINED: + pr_debug("[CQM] BSS regained.\n"); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + break; + case WSM_EVENT_RADAR_DETECTED: + wiphy_info(priv->hw->wiphy, "radar pulse detected\n"); + break; + case WSM_EVENT_RCPI_RSSI: + { + /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 + * RSSI = RCPI / 2 - 110 + */ + int rcpiRssi = (int)(event->evt.data & 0xFF); + int cqm_evt; + if (priv->cqm_use_rssi) + rcpiRssi = (s8)rcpiRssi; + else + rcpiRssi = rcpiRssi / 2 - 110; + + cqm_evt = (rcpiRssi <= priv->cqm_rssi_thold) ? + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW : + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; + pr_debug("[CQM] RSSI event: %d.\n", rcpiRssi); + ieee80211_cqm_rssi_notify(priv->vif, cqm_evt, + GFP_KERNEL); + break; + } + case WSM_EVENT_BT_INACTIVE: + pr_warn("Unhandled BT INACTIVE from LMAC\n"); + break; + case WSM_EVENT_BT_ACTIVE: + pr_warn("Unhandled BT ACTIVE from LMAC\n"); + break; + } + } + __cw1200_free_event_queue(&list); +} + +void cw1200_bss_loss_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bss_loss_work.work); + + pr_debug("[CQM] Reporting connection loss.\n"); + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); +} + +void cw1200_bss_params_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bss_params_work); + mutex_lock(&priv->conf_mutex); + + priv->bss_params.reset_beacon_loss = 1; + wsm_set_bss_params(priv, &priv->bss_params); + priv->bss_params.reset_beacon_loss = 0; + + mutex_unlock(&priv->conf_mutex); +} + +/* ******************************************************************** */ +/* Internal API */ + +/* + * This function is called to Parse the SDD file + * to extract listen_interval and PTA related information + * sdd is a TLV: u8 id, u8 len, u8 data[] + */ +static int cw1200_parse_sdd_file(struct cw1200_common *priv) +{ + const u8 *p = priv->sdd->data; + int ret = 0; + + while (p + 2 <= priv->sdd->data + priv->sdd->size) { + if (p + p[1] + 2 > priv->sdd->data + priv->sdd->size) { + pr_warn("Malformed sdd structure\n"); + return -1; + } + switch (p[0]) { + case SDD_PTA_CFG_ELT_ID: { + u16 v; + if (p[1] < 4) { + pr_warn("SDD_PTA_CFG_ELT_ID malformed\n"); + ret = -1; + break; + } + v = le16_to_cpu(*((u16 *)(p + 2))); + if (!v) /* non-zero means this is enabled */ + break; + + v = le16_to_cpu(*((u16 *)(p + 4))); + priv->conf_listen_interval = (v >> 7) & 0x1F; + pr_debug("PTA found; Listen Interval %d\n", + priv->conf_listen_interval); + break; + } + case SDD_REFERENCE_FREQUENCY_ELT_ID: { + u16 clk = le16_to_cpu(*((u16 *)(p + 2))); + if (clk != priv->hw_refclk) + pr_warn("SDD file doesn't match configured refclk (%d vs %d)\n", + clk, priv->hw_refclk); + break; + } + default: + break; + } + p += p[1] + 2; + } + + if (!priv->bt_present) { + pr_debug("PTA element NOT found.\n"); + priv->conf_listen_interval = 0; + } + return ret; +} + +int cw1200_setup_mac(struct cw1200_common *priv) +{ + int ret = 0; + + /* NOTE: There is a bug in FW: it reports signal + * as RSSI if RSSI subscription is enabled. + * It's not enough to set WSM_RCPI_RSSI_USE_RSSI. + * + * NOTE2: RSSI based reports have been switched to RCPI, since + * FW has a bug and RSSI reported values are not stable, + * what can leads to signal level oscilations in user-end applications + */ + struct wsm_rcpi_rssi_threshold threshold = { + .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE | + WSM_RCPI_RSSI_DONT_USE_UPPER | + WSM_RCPI_RSSI_DONT_USE_LOWER, + .rollingAverageCount = 16, + }; + + struct wsm_configuration cfg = { + .dot11StationId = &priv->mac_addr[0], + }; + + /* Remember the decission here to make sure, we will handle + * the RCPI/RSSI value correctly on WSM_EVENT_RCPI_RSS + */ + if (threshold.rssiRcpiMode & WSM_RCPI_RSSI_USE_RSSI) + priv->cqm_use_rssi = true; + + if (!priv->sdd) { + ret = request_firmware(&priv->sdd, priv->sdd_path, priv->pdev); + if (ret) { + pr_err("Can't load sdd file %s.\n", priv->sdd_path); + return ret; + } + cw1200_parse_sdd_file(priv); + } + + cfg.dpdData = priv->sdd->data; + cfg.dpdData_size = priv->sdd->size; + ret = wsm_configuration(priv, &cfg); + if (ret) + return ret; + + /* Configure RSSI/SCPI reporting as RSSI. */ + wsm_set_rcpi_rssi_threshold(priv, &threshold); + + return 0; +} + +static void cw1200_join_complete(struct cw1200_common *priv) +{ + pr_debug("[STA] Join complete (%d)\n", priv->join_complete_status); + + priv->join_pending = false; + if (priv->join_complete_status) { + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + cw1200_update_listening(priv, priv->listening); + cw1200_do_unjoin(priv); + ieee80211_connection_loss(priv->vif); + } else { + if (priv->mode == NL80211_IFTYPE_ADHOC) + priv->join_status = CW1200_JOIN_STATUS_IBSS; + else + priv->join_status = CW1200_JOIN_STATUS_PRE_STA; + } + wsm_unlock_tx(priv); /* Clearing the lock held before do_join() */ +} + +void cw1200_join_complete_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, join_complete_work); + mutex_lock(&priv->conf_mutex); + cw1200_join_complete(priv); + mutex_unlock(&priv->conf_mutex); +} + +void cw1200_join_complete_cb(struct cw1200_common *priv, + struct wsm_join_complete *arg) +{ + pr_debug("[STA] cw1200_join_complete_cb called, status=%d.\n", + arg->status); + + if (cancel_delayed_work(&priv->join_timeout)) { + priv->join_complete_status = arg->status; + queue_work(priv->workqueue, &priv->join_complete_work); + } +} + +/* MUST be called with tx_lock held! It will be unlocked for us. */ +static void cw1200_do_join(struct cw1200_common *priv) +{ + const u8 *bssid; + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct cfg80211_bss *bss = NULL; + struct wsm_protected_mgmt_policy mgmt_policy; + struct wsm_join join = { + .mode = conf->ibss_joined ? + WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS, + .preamble_type = WSM_JOIN_PREAMBLE_LONG, + .probe_for_join = 1, + .atim_window = 0, + .basic_rate_set = cw1200_rate_mask_to_wsm(priv, + conf->basic_rates), + }; + if (delayed_work_pending(&priv->join_timeout)) { + pr_warn("[STA] - Join request already pending, skipping..\n"); + wsm_unlock_tx(priv); + return; + } + + if (priv->join_status) + cw1200_do_unjoin(priv); + + bssid = priv->vif->bss_conf.bssid; + + bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel, + bssid, NULL, 0, 0, 0); + + if (!bss && !conf->ibss_joined) { + wsm_unlock_tx(priv); + return; + } + + mutex_lock(&priv->conf_mutex); + + /* Under the conf lock: check scan status and + * bail out if it is in progress. + */ + if (atomic_read(&priv->scan.in_progress)) { + wsm_unlock_tx(priv); + goto done_put; + } + + priv->join_pending = true; + + /* Sanity check basic rates */ + if (!join.basic_rate_set) + join.basic_rate_set = 7; + + /* Sanity check beacon interval */ + if (!priv->beacon_int) + priv->beacon_int = 1; + + join.beacon_interval = priv->beacon_int; + + /* BT Coex related changes */ + if (priv->bt_present) { + if (((priv->conf_listen_interval * 100) % + priv->beacon_int) == 0) + priv->listen_interval = + ((priv->conf_listen_interval * 100) / + priv->beacon_int); + else + priv->listen_interval = + ((priv->conf_listen_interval * 100) / + priv->beacon_int + 1); + } + + if (priv->hw->conf.ps_dtim_period) + priv->join_dtim_period = priv->hw->conf.ps_dtim_period; + join.dtim_period = priv->join_dtim_period; + + join.channel_number = priv->channel->hw_value; + join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + + memcpy(join.bssid, bssid, sizeof(join.bssid)); + + pr_debug("[STA] Join BSSID: %pM DTIM: %d, interval: %d\n", + join.bssid, + join.dtim_period, priv->beacon_int); + + if (!conf->ibss_joined) { + const u8 *ssidie; + rcu_read_lock(); + ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); + if (ssidie) { + join.ssid_len = ssidie[1]; + memcpy(join.ssid, &ssidie[2], join.ssid_len); + } + rcu_read_unlock(); + } + + if (priv->vif->p2p) { + join.flags |= WSM_JOIN_FLAGS_P2P_GO; + join.basic_rate_set = + cw1200_rate_mask_to_wsm(priv, 0xFF0); + } + + /* Enable asynchronous join calls */ + if (!conf->ibss_joined) { + join.flags |= WSM_JOIN_FLAGS_FORCE; + join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND; + } + + wsm_flush_tx(priv); + + /* Stay Awake for Join and Auth Timeouts and a bit more */ + cw1200_pm_stay_awake(&priv->pm_state, + CW1200_JOIN_TIMEOUT + CW1200_AUTH_TIMEOUT); + + cw1200_update_listening(priv, false); + + /* Turn on Block ACKs */ + wsm_set_block_ack_policy(priv, priv->ba_tx_tid_mask, + priv->ba_rx_tid_mask); + + /* Set up timeout */ + if (join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND) { + priv->join_status = CW1200_JOIN_STATUS_JOINING; + queue_delayed_work(priv->workqueue, + &priv->join_timeout, + CW1200_JOIN_TIMEOUT); + } + + /* 802.11w protected mgmt frames */ + mgmt_policy.protectedMgmtEnable = 0; + mgmt_policy.unprotectedMgmtFramesAllowed = 1; + mgmt_policy.encryptionForAuthFrame = 1; + wsm_set_protected_mgmt_policy(priv, &mgmt_policy); + + /* Perform actual join */ + if (wsm_join(priv, &join)) { + pr_err("[STA] cw1200_join_work: wsm_join failed!\n"); + cancel_delayed_work_sync(&priv->join_timeout); + cw1200_update_listening(priv, priv->listening); + /* Tx lock still held, unjoin will clear it. */ + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else { + if (!(join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND)) + cw1200_join_complete(priv); /* Will clear tx_lock */ + + /* Upload keys */ + cw1200_upload_keys(priv); + + /* Due to beacon filtering it is possible that the + * AP's beacon is not known for the mac80211 stack. + * Disable filtering temporary to make sure the stack + * receives at least one + */ + priv->disable_beacon_filter = true; + } + cw1200_update_filtering(priv); + +done_put: + mutex_unlock(&priv->conf_mutex); + if (bss) + cfg80211_put_bss(priv->hw->wiphy, bss); +} + +void cw1200_join_timeout(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, join_timeout.work); + pr_debug("[WSM] Join timed out.\n"); + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); +} + +static void cw1200_do_unjoin(struct cw1200_common *priv) +{ + struct wsm_reset reset = { + .reset_statistics = true, + }; + + cancel_delayed_work_sync(&priv->join_timeout); + + mutex_lock(&priv->conf_mutex); + priv->join_pending = false; + + if (atomic_read(&priv->scan.in_progress)) { + if (priv->delayed_unjoin) + wiphy_dbg(priv->hw->wiphy, "Delayed unjoin is already scheduled.\n"); + else + priv->delayed_unjoin = true; + goto done; + } + + priv->delayed_link_loss = false; + + if (!priv->join_status) + goto done; + + if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { + wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", + priv->join_status); + BUG_ON(1); + } + + cancel_work_sync(&priv->update_filtering_work); + cancel_work_sync(&priv->set_beacon_wakeup_period_work); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + + /* Unjoin is a reset. */ + wsm_flush_tx(priv); + wsm_keep_alive_period(priv, 0); + wsm_reset(priv, &reset); + wsm_set_output_power(priv, priv->output_power * 10); + priv->join_dtim_period = 0; + cw1200_setup_mac(priv); + cw1200_free_event_queue(priv); + cancel_work_sync(&priv->event_handler); + cw1200_update_listening(priv, priv->listening); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + + /* Disable Block ACKs */ + wsm_set_block_ack_policy(priv, 0, 0); + + priv->disable_beacon_filter = false; + cw1200_update_filtering(priv); + memset(&priv->association_mode, 0, + sizeof(priv->association_mode)); + memset(&priv->bss_params, 0, sizeof(priv->bss_params)); + priv->setbssparams_done = false; + memset(&priv->firmware_ps_mode, 0, + sizeof(priv->firmware_ps_mode)); + + pr_debug("[STA] Unjoin completed.\n"); + +done: + mutex_unlock(&priv->conf_mutex); +} + +void cw1200_unjoin_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, unjoin_work); + + cw1200_do_unjoin(priv); + + /* Tell the stack we're dead */ + ieee80211_connection_loss(priv->vif); + + wsm_unlock_tx(priv); +} + +int cw1200_enable_listening(struct cw1200_common *priv) +{ + struct wsm_start start = { + .mode = WSM_START_MODE_P2P_DEV, + .band = WSM_PHY_BAND_2_4G, + .beacon_interval = 100, + .dtim_period = 1, + .probe_delay = 0, + .basic_rate_set = 0x0F, + }; + + if (priv->channel) { + start.band = priv->channel->band == IEEE80211_BAND_5GHZ ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + start.channel_number = priv->channel->hw_value; + } else { + start.band = WSM_PHY_BAND_2_4G; + start.channel_number = 1; + } + + return wsm_start(priv, &start); +} + +int cw1200_disable_listening(struct cw1200_common *priv) +{ + int ret; + struct wsm_reset reset = { + .reset_statistics = true, + }; + ret = wsm_reset(priv, &reset); + return ret; +} + +void cw1200_update_listening(struct cw1200_common *priv, bool enabled) +{ + if (enabled) { + if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) { + if (!cw1200_enable_listening(priv)) + priv->join_status = CW1200_JOIN_STATUS_MONITOR; + wsm_set_probe_responder(priv, true); + } + } else { + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + if (!cw1200_disable_listening(priv)) + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + wsm_set_probe_responder(priv, false); + } + } +} + +int cw1200_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg) +{ + int ret; + u16 uapsd_flags = 0; + + /* Here's the mapping AC [queue, bit] + * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0] + */ + + if (arg->uapsd_enable[0]) + uapsd_flags |= 1 << 3; + + if (arg->uapsd_enable[1]) + uapsd_flags |= 1 << 2; + + if (arg->uapsd_enable[2]) + uapsd_flags |= 1 << 1; + + if (arg->uapsd_enable[3]) + uapsd_flags |= 1; + + /* Currently pseudo U-APSD operation is not supported, so setting + * MinAutoTriggerInterval, MaxAutoTriggerInterval and + * AutoTriggerStep to 0 + */ + + priv->uapsd_info.uapsd_flags = cpu_to_le16(uapsd_flags); + priv->uapsd_info.min_auto_trigger_interval = 0; + priv->uapsd_info.max_auto_trigger_interval = 0; + priv->uapsd_info.auto_trigger_step = 0; + + ret = wsm_set_uapsd_info(priv, &priv->uapsd_info); + return ret; +} + +/* ******************************************************************** */ +/* AP API */ + +int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + struct cw1200_link_entry *entry; + struct sk_buff *skb; + + if (priv->mode != NL80211_IFTYPE_AP) + return 0; + + sta_priv->link_id = cw1200_find_link_id(priv, sta->addr); + if (WARN_ON(!sta_priv->link_id)) { + wiphy_info(priv->hw->wiphy, + "[AP] No more link IDs available.\n"); + return -ENOENT; + } + + entry = &priv->link_id_db[sta_priv->link_id - 1]; + spin_lock_bh(&priv->ps_state_lock); + if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) == + IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) + priv->sta_asleep_mask |= BIT(sta_priv->link_id); + entry->status = CW1200_LINK_HARD; + while ((skb = skb_dequeue(&entry->rx_queue))) + ieee80211_rx_irqsafe(priv->hw, skb); + spin_unlock_bh(&priv->ps_state_lock); + return 0; +} + +int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + struct cw1200_link_entry *entry; + + if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id) + return 0; + + entry = &priv->link_id_db[sta_priv->link_id - 1]; + spin_lock_bh(&priv->ps_state_lock); + entry->status = CW1200_LINK_RESERVE; + entry->timestamp = jiffies; + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + spin_unlock_bh(&priv->ps_state_lock); + flush_workqueue(priv->workqueue); + return 0; +} + +static void __cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + int link_id) +{ + struct cw1200_common *priv = dev->priv; + u32 bit, prev; + + /* Zero link id means "for all link IDs" */ + if (link_id) + bit = BIT(link_id); + else if (WARN_ON_ONCE(notify_cmd != STA_NOTIFY_AWAKE)) + bit = 0; + else + bit = priv->link_id_map; + prev = priv->sta_asleep_mask & bit; + + switch (notify_cmd) { + case STA_NOTIFY_SLEEP: + if (!prev) { + if (priv->buffered_multicasts && + !priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_start_work); + priv->sta_asleep_mask |= bit; + } + break; + case STA_NOTIFY_AWAKE: + if (prev) { + priv->sta_asleep_mask &= ~bit; + priv->pspoll_mask &= ~bit; + if (priv->tx_multicast && link_id && + !priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_stop_work); + cw1200_bh_wakeup(priv); + } + break; + } +} + +void cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = dev->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + + spin_lock_bh(&priv->ps_state_lock); + __cw1200_sta_notify(dev, vif, notify_cmd, sta_priv->link_id); + spin_unlock_bh(&priv->ps_state_lock); +} + +static void cw1200_ps_notify(struct cw1200_common *priv, + int link_id, bool ps) +{ + if (link_id > CW1200_MAX_STA_IN_AP_MODE) + return; + + pr_debug("%s for LinkId: %d. STAs asleep: %.8X\n", + ps ? "Stop" : "Start", + link_id, priv->sta_asleep_mask); + + __cw1200_sta_notify(priv->hw, priv->vif, + ps ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, link_id); +} + +static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set) +{ + struct sk_buff *skb; + struct wsm_update_ie update_ie = { + .what = WSM_UPDATE_IE_BEACON, + .count = 1, + }; + u16 tim_offset, tim_length; + + pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis"); + + skb = ieee80211_beacon_get_tim(priv->hw, priv->vif, + &tim_offset, &tim_length); + if (!skb) { + if (!__cw1200_flush(priv, true)) + wsm_unlock_tx(priv); + return -ENOENT; + } + + if (tim_offset && tim_length >= 6) { + /* Ignore DTIM count from mac80211: + * firmware handles DTIM internally. + */ + skb->data[tim_offset + 2] = 0; + + /* Set/reset aid0 bit */ + if (aid0_bit_set) + skb->data[tim_offset + 4] |= 1; + else + skb->data[tim_offset + 4] &= ~1; + } + + update_ie.ies = &skb->data[tim_offset]; + update_ie.length = tim_length; + wsm_update_ie(priv, &update_ie); + + dev_kfree_skb(skb); + + return 0; +} + +void cw1200_set_tim_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, set_tim_work); + (void)cw1200_set_tim_impl(priv, priv->aid0_bit_set); +} + +int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, + bool set) +{ + struct cw1200_common *priv = dev->priv; + queue_work(priv->workqueue, &priv->set_tim_work); + return 0; +} + +void cw1200_set_cts_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, set_cts_work); + + u8 erp_ie[3] = {WLAN_EID_ERP_INFO, 0x1, 0}; + struct wsm_update_ie update_ie = { + .what = WSM_UPDATE_IE_BEACON, + .count = 1, + .ies = erp_ie, + .length = 3, + }; + u32 erp_info; + __le32 use_cts_prot; + mutex_lock(&priv->conf_mutex); + erp_info = priv->erp_info; + mutex_unlock(&priv->conf_mutex); + use_cts_prot = + erp_info & WLAN_ERP_USE_PROTECTION ? + __cpu_to_le32(1) : 0; + + erp_ie[ERP_INFO_BYTE_OFFSET] = erp_info; + + pr_debug("[STA] ERP information 0x%x\n", erp_info); + + wsm_write_mib(priv, WSM_MIB_ID_NON_ERP_PROTECTION, + &use_cts_prot, sizeof(use_cts_prot)); + wsm_update_ie(priv, &update_ie); + + return; +} + +static int cw1200_set_btcoexinfo(struct cw1200_common *priv) +{ + struct wsm_override_internal_txrate arg; + int ret = 0; + + if (priv->mode == NL80211_IFTYPE_STATION) { + /* Plumb PSPOLL and NULL template */ + cw1200_upload_pspoll(priv); + cw1200_upload_null(priv); + cw1200_upload_qosnull(priv); + } else { + return 0; + } + + memset(&arg, 0, sizeof(struct wsm_override_internal_txrate)); + + if (!priv->vif->p2p) { + /* STATION mode */ + if (priv->bss_params.operational_rate_set & ~0xF) { + pr_debug("[STA] STA has ERP rates\n"); + /* G or BG mode */ + arg.internalTxRate = (__ffs( + priv->bss_params.operational_rate_set & ~0xF)); + } else { + pr_debug("[STA] STA has non ERP rates\n"); + /* B only mode */ + arg.internalTxRate = (__ffs(priv->association_mode.basic_rate_set)); + } + arg.nonErpInternalTxRate = (__ffs(priv->association_mode.basic_rate_set)); + } else { + /* P2P mode */ + arg.internalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF)); + arg.nonErpInternalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF)); + } + + pr_debug("[STA] BTCOEX_INFO MODE %d, internalTxRate : %x, nonErpInternalTxRate: %x\n", + priv->mode, + arg.internalTxRate, + arg.nonErpInternalTxRate); + + ret = wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE, + &arg, sizeof(arg)); + + return ret; +} + +void cw1200_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct cw1200_common *priv = dev->priv; + bool do_join = false; + + mutex_lock(&priv->conf_mutex); + + pr_debug("BSS CHANGED: %08x\n", changed); + + /* TODO: BSS_CHANGED_QOS */ + /* TODO: BSS_CHANGED_TXPOWER */ + + if (changed & BSS_CHANGED_ARP_FILTER) { + struct wsm_mib_arp_ipv4_filter filter = {0}; + int i; + + pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n", + info->arp_addr_cnt); + + /* Currently only one IP address is supported by firmware. + * In case of more IPs arp filtering will be disabled. + */ + if (info->arp_addr_cnt > 0 && + info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) { + for (i = 0; i < info->arp_addr_cnt; i++) { + filter.ipv4addrs[i] = info->arp_addr_list[i]; + pr_debug("[STA] addr[%d]: 0x%X\n", + i, filter.ipv4addrs[i]); + } + filter.enable = __cpu_to_le32(1); + } + + pr_debug("[STA] arp ip filter enable: %d\n", + __le32_to_cpu(filter.enable)); + + wsm_set_arp_ipv4_filter(priv, &filter); + } + + if (changed & + (BSS_CHANGED_BEACON | + BSS_CHANGED_AP_PROBE_RESP | + BSS_CHANGED_BSSID | + BSS_CHANGED_SSID | + BSS_CHANGED_IBSS)) { + pr_debug("BSS_CHANGED_BEACON\n"); + priv->beacon_int = info->beacon_int; + cw1200_update_beaconing(priv); + cw1200_upload_beacon(priv); + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + pr_debug("BSS_CHANGED_BEACON_ENABLED (%d)\n", info->enable_beacon); + + if (priv->enable_beacon != info->enable_beacon) { + cw1200_enable_beaconing(priv, info->enable_beacon); + priv->enable_beacon = info->enable_beacon; + } + } + + if (changed & BSS_CHANGED_BEACON_INT) { + pr_debug("CHANGED_BEACON_INT\n"); + if (info->ibss_joined) + do_join = true; + else if (priv->join_status == CW1200_JOIN_STATUS_AP) + cw1200_update_beaconing(priv); + } + + /* assoc/disassoc, or maybe AID changed */ + if (changed & BSS_CHANGED_ASSOC) { + wsm_lock_tx(priv); + priv->wep_default_key_id = -1; + wsm_unlock_tx(priv); + } + + if (changed & BSS_CHANGED_BSSID) { + pr_debug("BSS_CHANGED_BSSID\n"); + do_join = true; + } + + if (changed & + (BSS_CHANGED_ASSOC | + BSS_CHANGED_BSSID | + BSS_CHANGED_IBSS | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_HT)) { + pr_debug("BSS_CHANGED_ASSOC\n"); + if (info->assoc) { + if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) { + ieee80211_connection_loss(vif); + mutex_unlock(&priv->conf_mutex); + return; + } else if (priv->join_status == CW1200_JOIN_STATUS_PRE_STA) { + priv->join_status = CW1200_JOIN_STATUS_STA; + } + } else { + do_join = true; + } + + if (info->assoc || info->ibss_joined) { + struct ieee80211_sta *sta = NULL; + u32 val = 0; + + if (info->dtim_period) + priv->join_dtim_period = info->dtim_period; + priv->beacon_int = info->beacon_int; + + rcu_read_lock(); + + if (info->bssid && !info->ibss_joined) + sta = ieee80211_find_sta(vif, info->bssid); + if (sta) { + priv->ht_info.ht_cap = sta->ht_cap; + priv->bss_params.operational_rate_set = + cw1200_rate_mask_to_wsm(priv, + sta->supp_rates[priv->channel->band]); + priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef); + priv->ht_info.operation_mode = info->ht_operation_mode; + } else { + memset(&priv->ht_info, 0, + sizeof(priv->ht_info)); + priv->bss_params.operational_rate_set = -1; + } + rcu_read_unlock(); + + /* Non Greenfield stations present */ + if (priv->ht_info.operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) + val |= WSM_NON_GREENFIELD_STA_PRESENT; + + /* Set HT protection method */ + val |= (priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2; + + /* TODO: + * STBC_param.dual_cts + * STBC_param.LSIG_TXOP_FILL + */ + + val = cpu_to_le32(val); + wsm_write_mib(priv, WSM_MIB_ID_SET_HT_PROTECTION, + &val, sizeof(val)); + + priv->association_mode.greenfield = + cw1200_ht_greenfield(&priv->ht_info); + priv->association_mode.flags = + WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES | + WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE | + WSM_ASSOCIATION_MODE_USE_HT_MODE | + WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET | + WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING; + priv->association_mode.preamble = + info->use_short_preamble ? + WSM_JOIN_PREAMBLE_SHORT : + WSM_JOIN_PREAMBLE_LONG; + priv->association_mode.basic_rate_set = __cpu_to_le32( + cw1200_rate_mask_to_wsm(priv, + info->basic_rates)); + priv->association_mode.mpdu_start_spacing = + cw1200_ht_ampdu_density(&priv->ht_info); + + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + + priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count; + priv->bss_params.aid = info->aid; + + if (priv->join_dtim_period < 1) + priv->join_dtim_period = 1; + + pr_debug("[STA] DTIM %d, interval: %d\n", + priv->join_dtim_period, priv->beacon_int); + pr_debug("[STA] Preamble: %d, Greenfield: %d, Aid: %d, Rates: 0x%.8X, Basic: 0x%.8X\n", + priv->association_mode.preamble, + priv->association_mode.greenfield, + priv->bss_params.aid, + priv->bss_params.operational_rate_set, + priv->association_mode.basic_rate_set); + wsm_set_association_mode(priv, &priv->association_mode); + + if (!info->ibss_joined) { + wsm_keep_alive_period(priv, 30 /* sec */); + wsm_set_bss_params(priv, &priv->bss_params); + priv->setbssparams_done = true; + cw1200_set_beacon_wakeup_period_work(&priv->set_beacon_wakeup_period_work); + cw1200_set_pm(priv, &priv->powersave_mode); + } + if (priv->vif->p2p) { + pr_debug("[STA] Setting p2p powersave configuration.\n"); + wsm_set_p2p_ps_modeinfo(priv, + &priv->p2p_ps_modeinfo); + } + if (priv->bt_present) + cw1200_set_btcoexinfo(priv); + } else { + memset(&priv->association_mode, 0, + sizeof(priv->association_mode)); + memset(&priv->bss_params, 0, sizeof(priv->bss_params)); + } + } + + /* ERP Protection */ + if (changed & (BSS_CHANGED_ASSOC | + BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE)) { + u32 prev_erp_info = priv->erp_info; + if (info->use_cts_prot) + priv->erp_info |= WLAN_ERP_USE_PROTECTION; + else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT)) + priv->erp_info &= ~WLAN_ERP_USE_PROTECTION; + + if (info->use_short_preamble) + priv->erp_info |= WLAN_ERP_BARKER_PREAMBLE; + else + priv->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE; + + pr_debug("[STA] ERP Protection: %x\n", priv->erp_info); + + if (prev_erp_info != priv->erp_info) + queue_work(priv->workqueue, &priv->set_cts_work); + } + + /* ERP Slottime */ + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT)) { + __le32 slot_time = info->use_short_slot ? + __cpu_to_le32(9) : __cpu_to_le32(20); + pr_debug("[STA] Slot time: %d us.\n", + __le32_to_cpu(slot_time)); + wsm_write_mib(priv, WSM_MIB_ID_DOT11_SLOT_TIME, + &slot_time, sizeof(slot_time)); + } + + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) { + struct wsm_rcpi_rssi_threshold threshold = { + .rollingAverageCount = 8, + }; + pr_debug("[CQM] RSSI threshold subscribe: %d +- %d\n", + info->cqm_rssi_thold, info->cqm_rssi_hyst); + priv->cqm_rssi_thold = info->cqm_rssi_thold; + priv->cqm_rssi_hyst = info->cqm_rssi_hyst; + + if (info->cqm_rssi_thold || info->cqm_rssi_hyst) { + /* RSSI subscription enabled */ + /* TODO: It's not a correct way of setting threshold. + * Upper and lower must be set equal here and adjusted + * in callback. However current implementation is much + * more relaible and stable. + */ + + /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 + * RSSI = RCPI / 2 - 110 + */ + if (priv->cqm_use_rssi) { + threshold.upperThreshold = + info->cqm_rssi_thold + info->cqm_rssi_hyst; + threshold.lowerThreshold = + info->cqm_rssi_thold; + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI; + } else { + threshold.upperThreshold = (info->cqm_rssi_thold + info->cqm_rssi_hyst + 110) * 2; + threshold.lowerThreshold = (info->cqm_rssi_thold + 110) * 2; + } + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_THRESHOLD_ENABLE; + } else { + /* There is a bug in FW, see sta.c. We have to enable + * dummy subscription to get correct RSSI values. + */ + threshold.rssiRcpiMode |= + WSM_RCPI_RSSI_THRESHOLD_ENABLE | + WSM_RCPI_RSSI_DONT_USE_UPPER | + WSM_RCPI_RSSI_DONT_USE_LOWER; + if (priv->cqm_use_rssi) + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI; + } + wsm_set_rcpi_rssi_threshold(priv, &threshold); + } + mutex_unlock(&priv->conf_mutex); + + if (do_join) { + wsm_lock_tx(priv); + cw1200_do_join(priv); /* Will unlock it for us */ + } +} + +void cw1200_multicast_start_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, multicast_start_work); + long tmo = priv->join_dtim_period * + (priv->beacon_int + 20) * HZ / 1024; + + cancel_work_sync(&priv->multicast_stop_work); + + if (!priv->aid0_bit_set) { + wsm_lock_tx(priv); + cw1200_set_tim_impl(priv, true); + priv->aid0_bit_set = true; + mod_timer(&priv->mcast_timeout, jiffies + tmo); + wsm_unlock_tx(priv); + } +} + +void cw1200_multicast_stop_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, multicast_stop_work); + + if (priv->aid0_bit_set) { + del_timer_sync(&priv->mcast_timeout); + wsm_lock_tx(priv); + priv->aid0_bit_set = false; + cw1200_set_tim_impl(priv, false); + wsm_unlock_tx(priv); + } +} + +void cw1200_mcast_timeout(unsigned long arg) +{ + struct cw1200_common *priv = + (struct cw1200_common *)arg; + + wiphy_warn(priv->hw->wiphy, + "Multicast delivery timeout.\n"); + spin_lock_bh(&priv->ps_state_lock); + priv->tx_multicast = priv->aid0_bit_set && + priv->buffered_multicasts; + if (priv->tx_multicast) + cw1200_bh_wakeup(priv); + spin_unlock_bh(&priv->ps_state_lock); +} + +int cw1200_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + /* Aggregation is implemented fully in firmware, + * including block ack negotiation. Do not allow + * mac80211 stack to do anything: it interferes with + * the firmware. + */ + + /* Note that we still need this function stubbed. */ + return -ENOTSUPP; +} + +/* ******************************************************************** */ +/* WSM callback */ +void cw1200_suspend_resume(struct cw1200_common *priv, + struct wsm_suspend_resume *arg) +{ + pr_debug("[AP] %s: %s\n", + arg->stop ? "stop" : "start", + arg->multicast ? "broadcast" : "unicast"); + + if (arg->multicast) { + bool cancel_tmo = false; + spin_lock_bh(&priv->ps_state_lock); + if (arg->stop) { + priv->tx_multicast = false; + } else { + /* Firmware sends this indication every DTIM if there + * is a STA in powersave connected. There is no reason + * to suspend, following wakeup will consume much more + * power than it could be saved. + */ + cw1200_pm_stay_awake(&priv->pm_state, + priv->join_dtim_period * + (priv->beacon_int + 20) * HZ / 1024); + priv->tx_multicast = (priv->aid0_bit_set && + priv->buffered_multicasts); + if (priv->tx_multicast) { + cancel_tmo = true; + cw1200_bh_wakeup(priv); + } + } + spin_unlock_bh(&priv->ps_state_lock); + if (cancel_tmo) + del_timer_sync(&priv->mcast_timeout); + } else { + spin_lock_bh(&priv->ps_state_lock); + cw1200_ps_notify(priv, arg->link_id, arg->stop); + spin_unlock_bh(&priv->ps_state_lock); + if (!arg->stop) + cw1200_bh_wakeup(priv); + } + return; +} + +/* ******************************************************************** */ +/* AP privates */ + +static int cw1200_upload_beacon(struct cw1200_common *priv) +{ + int ret = 0; + struct ieee80211_mgmt *mgmt; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_BEACON, + }; + + u16 tim_offset; + u16 tim_len; + + if (priv->mode == NL80211_IFTYPE_STATION || + priv->mode == NL80211_IFTYPE_MONITOR || + priv->mode == NL80211_IFTYPE_UNSPECIFIED) + goto done; + + if (priv->vif->p2p) + frame.rate = WSM_TRANSMIT_RATE_6; + + frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif, + &tim_offset, &tim_len); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + if (ret) + goto done; + + /* TODO: Distill probe resp; remove TIM + * and any other beacon-specific IEs + */ + mgmt = (void *)frame.skb->data; + mgmt->frame_control = + __cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + + frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE; + if (priv->vif->p2p) { + ret = wsm_set_probe_responder(priv, true); + } else { + ret = wsm_set_template_frame(priv, &frame); + wsm_set_probe_responder(priv, false); + } + +done: + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_pspoll(struct cw1200_common *priv) +{ + int ret = 0; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PS_POLL, + .rate = 0xFF, + }; + + + frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_null(struct cw1200_common *priv) +{ + int ret = 0; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_NULL, + .rate = 0xFF, + }; + + frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_qosnull(struct cw1200_common *priv) +{ + int ret = 0; + /* TODO: This needs to be implemented + + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_QOS_NULL, + .rate = 0xFF, + }; + + frame.skb = ieee80211_qosnullfunc_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + */ + return ret; +} + +static int cw1200_enable_beaconing(struct cw1200_common *priv, + bool enable) +{ + struct wsm_beacon_transmit transmit = { + .enable_beaconing = enable, + }; + + return wsm_beacon_transmit(priv, &transmit); +} + +static int cw1200_start_ap(struct cw1200_common *priv) +{ + int ret; + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct wsm_start start = { + .mode = priv->vif->p2p ? + WSM_START_MODE_P2P_GO : WSM_START_MODE_AP, + .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G, + .channel_number = priv->channel->hw_value, + .beacon_interval = conf->beacon_int, + .dtim_period = conf->dtim_period, + .preamble = conf->use_short_preamble ? + WSM_JOIN_PREAMBLE_SHORT : + WSM_JOIN_PREAMBLE_LONG, + .probe_delay = 100, + .basic_rate_set = cw1200_rate_mask_to_wsm(priv, + conf->basic_rates), + }; + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + memset(start.ssid, 0, sizeof(start.ssid)); + if (!conf->hidden_ssid) { + start.ssid_len = conf->ssid_len; + memcpy(start.ssid, conf->ssid, start.ssid_len); + } + + priv->beacon_int = conf->beacon_int; + priv->join_dtim_period = conf->dtim_period; + + memset(&priv->link_id_db, 0, sizeof(priv->link_id_db)); + + pr_debug("[AP] ch: %d(%d), bcn: %d(%d), brt: 0x%.8X, ssid: %.*s.\n", + start.channel_number, start.band, + start.beacon_interval, start.dtim_period, + start.basic_rate_set, + start.ssid_len, start.ssid); + ret = wsm_start(priv, &start); + if (!ret) + ret = cw1200_upload_keys(priv); + if (!ret && priv->vif->p2p) { + pr_debug("[AP] Setting p2p powersave configuration.\n"); + wsm_set_p2p_ps_modeinfo(priv, &priv->p2p_ps_modeinfo); + } + if (!ret) { + wsm_set_block_ack_policy(priv, 0, 0); + priv->join_status = CW1200_JOIN_STATUS_AP; + cw1200_update_filtering(priv); + } + wsm_set_operational_mode(priv, &mode); + return ret; +} + +static int cw1200_update_beaconing(struct cw1200_common *priv) +{ + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct wsm_reset reset = { + .link_id = 0, + .reset_statistics = true, + }; + + if (priv->mode == NL80211_IFTYPE_AP) { + /* TODO: check if changed channel, band */ + if (priv->join_status != CW1200_JOIN_STATUS_AP || + priv->beacon_int != conf->beacon_int) { + pr_debug("ap restarting\n"); + wsm_lock_tx(priv); + if (priv->join_status != CW1200_JOIN_STATUS_PASSIVE) + wsm_reset(priv, &reset); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + cw1200_start_ap(priv); + wsm_unlock_tx(priv); + } else + pr_debug("ap started join_status: %d\n", + priv->join_status); + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h new file mode 100644 index 000000000000..35babb62cc6a --- /dev/null +++ b/drivers/net/wireless/cw1200/sta.h @@ -0,0 +1,123 @@ +/* + * Mac80211 STA interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef STA_H_INCLUDED +#define STA_H_INCLUDED + +/* ******************************************************************** */ +/* mac80211 API */ + +int cw1200_start(struct ieee80211_hw *dev); +void cw1200_stop(struct ieee80211_hw *dev); +int cw1200_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif); +void cw1200_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif); +int cw1200_change_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, + bool p2p); +int cw1200_config(struct ieee80211_hw *dev, u32 changed); +void cw1200_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); +int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +int cw1200_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats); +int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); + +int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value); + +void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop); + +u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list); + +int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg); + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_join_complete_cb(struct cw1200_common *priv, + struct wsm_join_complete *arg); + +/* ******************************************************************** */ +/* WSM events */ + +void cw1200_free_event_queue(struct cw1200_common *priv); +void cw1200_event_handler(struct work_struct *work); +void cw1200_bss_loss_work(struct work_struct *work); +void cw1200_bss_params_work(struct work_struct *work); +void cw1200_keep_alive_work(struct work_struct *work); +void cw1200_tx_failure_work(struct work_struct *work); + +void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, int init, int good, + int bad); +static inline void cw1200_cqm_bssloss_sm(struct cw1200_common *priv, + int init, int good, int bad) +{ + spin_lock(&priv->bss_loss_lock); + __cw1200_cqm_bssloss_sm(priv, init, good, bad); + spin_unlock(&priv->bss_loss_lock); +} + +/* ******************************************************************** */ +/* Internal API */ + +int cw1200_setup_mac(struct cw1200_common *priv); +void cw1200_join_timeout(struct work_struct *work); +void cw1200_unjoin_work(struct work_struct *work); +void cw1200_join_complete_work(struct work_struct *work); +void cw1200_wep_key_work(struct work_struct *work); +void cw1200_update_listening(struct cw1200_common *priv, bool enabled); +void cw1200_update_filtering(struct cw1200_common *priv); +void cw1200_update_filtering_work(struct work_struct *work); +void cw1200_set_beacon_wakeup_period_work(struct work_struct *work); +int cw1200_enable_listening(struct cw1200_common *priv); +int cw1200_disable_listening(struct cw1200_common *priv); +int cw1200_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg); +void cw1200_ba_work(struct work_struct *work); +void cw1200_ba_timer(unsigned long arg); + +/* AP stuffs */ +int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, + bool set); +int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta); +void cw1200_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed); +int cw1200_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); + +void cw1200_suspend_resume(struct cw1200_common *priv, + struct wsm_suspend_resume *arg); +void cw1200_set_tim_work(struct work_struct *work); +void cw1200_set_cts_work(struct work_struct *work); +void cw1200_multicast_start_work(struct work_struct *work); +void cw1200_multicast_stop_work(struct work_struct *work); +void cw1200_mcast_timeout(unsigned long arg); + +#endif diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c new file mode 100644 index 000000000000..0e40890e1993 --- /dev/null +++ b/drivers/net/wireless/cw1200/txrx.c @@ -0,0 +1,1474 @@ +/* + * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "cw1200.h" +#include "wsm.h" +#include "bh.h" +#include "sta.h" +#include "debug.h" + +#define CW1200_INVALID_RATE_ID (0xFF) + +static int cw1200_handle_action_rx(struct cw1200_common *priv, + struct sk_buff *skb); +static const struct ieee80211_rate * +cw1200_get_tx_rate(const struct cw1200_common *priv, + const struct ieee80211_tx_rate *rate); + +/* ******************************************************************** */ +/* TX queue lock / unlock */ + +static inline void cw1200_tx_queues_lock(struct cw1200_common *priv) +{ + int i; + for (i = 0; i < 4; ++i) + cw1200_queue_lock(&priv->tx_queue[i]); +} + +static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv) +{ + int i; + for (i = 0; i < 4; ++i) + cw1200_queue_unlock(&priv->tx_queue[i]); +} + +/* ******************************************************************** */ +/* TX policy cache implementation */ + +static void tx_policy_dump(struct tx_policy *policy) +{ + pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n", + policy->raw[0] & 0x0F, policy->raw[0] >> 4, + policy->raw[1] & 0x0F, policy->raw[1] >> 4, + policy->raw[2] & 0x0F, policy->raw[2] >> 4, + policy->raw[3] & 0x0F, policy->raw[3] >> 4, + policy->raw[4] & 0x0F, policy->raw[4] >> 4, + policy->raw[5] & 0x0F, policy->raw[5] >> 4, + policy->raw[6] & 0x0F, policy->raw[6] >> 4, + policy->raw[7] & 0x0F, policy->raw[7] >> 4, + policy->raw[8] & 0x0F, policy->raw[8] >> 4, + policy->raw[9] & 0x0F, policy->raw[9] >> 4, + policy->raw[10] & 0x0F, policy->raw[10] >> 4, + policy->raw[11] & 0x0F, policy->raw[11] >> 4, + policy->defined); +} + +static void tx_policy_build(const struct cw1200_common *priv, + /* [out] */ struct tx_policy *policy, + struct ieee80211_tx_rate *rates, size_t count) +{ + int i, j; + unsigned limit = priv->short_frame_max_tx_count; + unsigned total = 0; + BUG_ON(rates[0].idx < 0); + memset(policy, 0, sizeof(*policy)); + + /* minstrel is buggy a little bit, so distille + * incoming rates first. */ + + /* Sort rates in descending order. */ + for (i = 1; i < count; ++i) { + if (rates[i].idx < 0) { + count = i; + break; + } + if (rates[i].idx > rates[i - 1].idx) { + struct ieee80211_tx_rate tmp = rates[i - 1]; + rates[i - 1] = rates[i]; + rates[i] = tmp; + } + } + + /* Eliminate duplicates. */ + total = rates[0].count; + for (i = 0, j = 1; j < count; ++j) { + if (rates[j].idx == rates[i].idx) { + rates[i].count += rates[j].count; + } else if (rates[j].idx > rates[i].idx) { + break; + } else { + ++i; + if (i != j) + rates[i] = rates[j]; + } + total += rates[j].count; + } + count = i + 1; + + /* Re-fill policy trying to keep every requested rate and with + * respect to the global max tx retransmission count. */ + if (limit < count) + limit = count; + if (total > limit) { + for (i = 0; i < count; ++i) { + int left = count - i - 1; + if (rates[i].count > limit - left) + rates[i].count = limit - left; + limit -= rates[i].count; + } + } + + /* HACK!!! Device has problems (at least) switching from + * 54Mbps CTS to 1Mbps. This switch takes enormous amount + * of time (100-200 ms), leading to valuable throughput drop. + * As a workaround, additional g-rates are injected to the + * policy. + */ + if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) && + rates[0].idx > 4 && rates[0].count > 2 && + rates[1].idx < 2) { + /* ">> 1" is an equivalent of "/ 2", but faster */ + int mid_rate = (rates[0].idx + 4) >> 1; + + /* Decrease number of retries for the initial rate */ + rates[0].count -= 2; + + if (mid_rate != 4) { + /* Keep fallback rate at 1Mbps. */ + rates[3] = rates[1]; + + /* Inject 1 transmission on lowest g-rate */ + rates[2].idx = 4; + rates[2].count = 1; + rates[2].flags = rates[1].flags; + + /* Inject 1 transmission on mid-rate */ + rates[1].idx = mid_rate; + rates[1].count = 1; + + /* Fallback to 1 Mbps is a really bad thing, + * so let's try to increase probability of + * successful transmission on the lowest g rate + * even more */ + if (rates[0].count >= 3) { + --rates[0].count; + ++rates[2].count; + } + + /* Adjust amount of rates defined */ + count += 2; + } else { + /* Keep fallback rate at 1Mbps. */ + rates[2] = rates[1]; + + /* Inject 2 transmissions on lowest g-rate */ + rates[1].idx = 4; + rates[1].count = 2; + + /* Adjust amount of rates defined */ + count += 1; + } + } + + policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1; + + for (i = 0; i < count; ++i) { + register unsigned rateid, off, shift, retries; + + rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value; + off = rateid >> 3; /* eq. rateid / 8 */ + shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */ + + retries = rates[i].count; + if (retries > 0x0F) { + rates[i].count = 0x0f; + retries = 0x0F; + } + policy->tbl[off] |= __cpu_to_le32(retries << shift); + policy->retry_count += retries; + } + + pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n", + count, + rates[0].idx, rates[0].count, + rates[1].idx, rates[1].count, + rates[2].idx, rates[2].count, + rates[3].idx, rates[3].count, + rates[4].idx, rates[4].count); +} + +static inline bool tx_policy_is_equal(const struct tx_policy *wanted, + const struct tx_policy *cached) +{ + size_t count = wanted->defined >> 1; + if (wanted->defined > cached->defined) + return false; + if (count) { + if (memcmp(wanted->raw, cached->raw, count)) + return false; + } + if (wanted->defined & 1) { + if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F)) + return false; + } + return true; +} + +static int tx_policy_find(struct tx_policy_cache *cache, + const struct tx_policy *wanted) +{ + /* O(n) complexity. Not so good, but there's only 8 entries in + * the cache. + * Also lru helps to reduce search time. */ + struct tx_policy_cache_entry *it; + /* First search for policy in "used" list */ + list_for_each_entry(it, &cache->used, link) { + if (tx_policy_is_equal(wanted, &it->policy)) + return it - cache->cache; + } + /* Then - in "free list" */ + list_for_each_entry(it, &cache->free, link) { + if (tx_policy_is_equal(wanted, &it->policy)) + return it - cache->cache; + } + return -1; +} + +static inline void tx_policy_use(struct tx_policy_cache *cache, + struct tx_policy_cache_entry *entry) +{ + ++entry->policy.usage_count; + list_move(&entry->link, &cache->used); +} + +static inline int tx_policy_release(struct tx_policy_cache *cache, + struct tx_policy_cache_entry *entry) +{ + int ret = --entry->policy.usage_count; + if (!ret) + list_move(&entry->link, &cache->free); + return ret; +} + +void tx_policy_clean(struct cw1200_common *priv) +{ + int idx, locked; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + struct tx_policy_cache_entry *entry; + + cw1200_tx_queues_lock(priv); + spin_lock_bh(&cache->lock); + locked = list_empty(&cache->free); + + for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) { + entry = &cache->cache[idx]; + /* Policy usage count should be 0 at this time as all queues + should be empty */ + if (WARN_ON(entry->policy.usage_count)) { + entry->policy.usage_count = 0; + list_move(&entry->link, &cache->free); + } + memset(&entry->policy, 0, sizeof(entry->policy)); + } + if (locked) + cw1200_tx_queues_unlock(priv); + + cw1200_tx_queues_unlock(priv); + spin_unlock_bh(&cache->lock); +} + +/* ******************************************************************** */ +/* External TX policy cache API */ + +void tx_policy_init(struct cw1200_common *priv) +{ + struct tx_policy_cache *cache = &priv->tx_policy_cache; + int i; + + memset(cache, 0, sizeof(*cache)); + + spin_lock_init(&cache->lock); + INIT_LIST_HEAD(&cache->used); + INIT_LIST_HEAD(&cache->free); + + for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) + list_add(&cache->cache[i].link, &cache->free); +} + +static int tx_policy_get(struct cw1200_common *priv, + struct ieee80211_tx_rate *rates, + size_t count, bool *renew) +{ + int idx; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + struct tx_policy wanted; + + tx_policy_build(priv, &wanted, rates, count); + + spin_lock_bh(&cache->lock); + if (WARN_ON_ONCE(list_empty(&cache->free))) { + spin_unlock_bh(&cache->lock); + return CW1200_INVALID_RATE_ID; + } + idx = tx_policy_find(cache, &wanted); + if (idx >= 0) { + pr_debug("[TX policy] Used TX policy: %d\n", idx); + *renew = false; + } else { + struct tx_policy_cache_entry *entry; + *renew = true; + /* If policy is not found create a new one + * using the oldest entry in "free" list */ + entry = list_entry(cache->free.prev, + struct tx_policy_cache_entry, link); + entry->policy = wanted; + idx = entry - cache->cache; + pr_debug("[TX policy] New TX policy: %d\n", idx); + tx_policy_dump(&entry->policy); + } + tx_policy_use(cache, &cache->cache[idx]); + if (list_empty(&cache->free)) { + /* Lock TX queues. */ + cw1200_tx_queues_lock(priv); + } + spin_unlock_bh(&cache->lock); + return idx; +} + +static void tx_policy_put(struct cw1200_common *priv, int idx) +{ + int usage, locked; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + + spin_lock_bh(&cache->lock); + locked = list_empty(&cache->free); + usage = tx_policy_release(cache, &cache->cache[idx]); + if (locked && !usage) { + /* Unlock TX queues. */ + cw1200_tx_queues_unlock(priv); + } + spin_unlock_bh(&cache->lock); +} + +static int tx_policy_upload(struct cw1200_common *priv) +{ + struct tx_policy_cache *cache = &priv->tx_policy_cache; + int i; + struct wsm_set_tx_rate_retry_policy arg = { + .num = 0, + }; + spin_lock_bh(&cache->lock); + + /* Upload only modified entries. */ + for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) { + struct tx_policy *src = &cache->cache[i].policy; + if (src->retry_count && !src->uploaded) { + struct wsm_tx_rate_retry_policy *dst = + &arg.tbl[arg.num]; + dst->index = i; + dst->short_retries = priv->short_frame_max_tx_count; + dst->long_retries = priv->long_frame_max_tx_count; + + dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED | + WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT; + memcpy(dst->rate_count_indices, src->tbl, + sizeof(dst->rate_count_indices)); + src->uploaded = 1; + ++arg.num; + } + } + spin_unlock_bh(&cache->lock); + cw1200_debug_tx_cache_miss(priv); + pr_debug("[TX policy] Upload %d policies\n", arg.num); + return wsm_set_tx_rate_retry_policy(priv, &arg); +} + +void tx_policy_upload_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, tx_policy_upload_work); + + pr_debug("[TX] TX policy upload.\n"); + tx_policy_upload(priv); + + wsm_unlock_tx(priv); + cw1200_tx_queues_unlock(priv); +} + +/* ******************************************************************** */ +/* cw1200 TX implementation */ + +struct cw1200_txinfo { + struct sk_buff *skb; + unsigned queue; + struct ieee80211_tx_info *tx_info; + const struct ieee80211_rate *rate; + struct ieee80211_hdr *hdr; + size_t hdrlen; + const u8 *da; + struct cw1200_sta_priv *sta_priv; + struct ieee80211_sta *sta; + struct cw1200_txpriv txpriv; +}; + +u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates) +{ + u32 ret = 0; + int i; + for (i = 0; i < 32; ++i) { + if (rates & BIT(i)) + ret |= BIT(priv->rates[i].hw_value); + } + return ret; +} + +static const struct ieee80211_rate * +cw1200_get_tx_rate(const struct cw1200_common *priv, + const struct ieee80211_tx_rate *rate) +{ + if (rate->idx < 0) + return NULL; + if (rate->flags & IEEE80211_TX_RC_MCS) + return &priv->mcs_rates[rate->idx]; + return &priv->hw->wiphy->bands[priv->channel->band]-> + bitrates[rate->idx]; +} + +static int +cw1200_tx_h_calc_link_ids(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (t->sta && t->sta_priv->link_id) + t->txpriv.raw_link_id = + t->txpriv.link_id = + t->sta_priv->link_id; + else if (priv->mode != NL80211_IFTYPE_AP) + t->txpriv.raw_link_id = + t->txpriv.link_id = 0; + else if (is_multicast_ether_addr(t->da)) { + if (priv->enable_beacon) { + t->txpriv.raw_link_id = 0; + t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM; + } else { + t->txpriv.raw_link_id = 0; + t->txpriv.link_id = 0; + } + } else { + t->txpriv.link_id = cw1200_find_link_id(priv, t->da); + if (!t->txpriv.link_id) + t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da); + if (!t->txpriv.link_id) { + wiphy_err(priv->hw->wiphy, + "No more link IDs available.\n"); + return -ENOENT; + } + t->txpriv.raw_link_id = t->txpriv.link_id; + } + if (t->txpriv.raw_link_id) + priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp = + jiffies; + if (t->sta && (t->sta->uapsd_queues & BIT(t->queue))) + t->txpriv.link_id = CW1200_LINK_ID_UAPSD; + return 0; +} + +static void +cw1200_tx_h_pm(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (ieee80211_is_auth(t->hdr->frame_control)) { + u32 mask = ~BIT(t->txpriv.raw_link_id); + spin_lock_bh(&priv->ps_state_lock); + priv->sta_asleep_mask &= mask; + priv->pspoll_mask &= mask; + spin_unlock_bh(&priv->ps_state_lock); + } +} + +static void +cw1200_tx_h_calc_tid(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (ieee80211_is_data_qos(t->hdr->frame_control)) { + u8 *qos = ieee80211_get_qos_ctl(t->hdr); + t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK; + } else if (ieee80211_is_data(t->hdr->frame_control)) { + t->txpriv.tid = 0; + } +} + +static int +cw1200_tx_h_crypt(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (!t->tx_info->control.hw_key || + !ieee80211_has_protected(t->hdr->frame_control)) + return 0; + + t->hdrlen += t->tx_info->control.hw_key->iv_len; + skb_put(t->skb, t->tx_info->control.hw_key->icv_len); + + if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) + skb_put(t->skb, 8); /* MIC space */ + + return 0; +} + +static int +cw1200_tx_h_align(struct cw1200_common *priv, + struct cw1200_txinfo *t, + u8 *flags) +{ + size_t offset = (size_t)t->skb->data & 3; + + if (!offset) + return 0; + + if (offset & 1) { + wiphy_err(priv->hw->wiphy, + "Bug: attempt to transmit a frame with wrong alignment: %zu\n", + offset); + return -EINVAL; + } + + if (skb_headroom(t->skb) < offset) { + wiphy_err(priv->hw->wiphy, + "Bug: no space allocated for DMA alignment. headroom: %d\n", + skb_headroom(t->skb)); + return -ENOMEM; + } + skb_push(t->skb, offset); + t->hdrlen += offset; + t->txpriv.offset += offset; + *flags |= WSM_TX_2BYTES_SHIFT; + cw1200_debug_tx_align(priv); + return 0; +} + +static int +cw1200_tx_h_action(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *)t->hdr; + if (ieee80211_is_action(t->hdr->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_BACK) + return 1; + else + return 0; +} + +/* Add WSM header */ +static struct wsm_tx * +cw1200_tx_h_wsm(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + struct wsm_tx *wsm; + + if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) { + wiphy_err(priv->hw->wiphy, + "Bug: no space allocated for WSM header. headroom: %d\n", + skb_headroom(t->skb)); + return NULL; + } + + wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx)); + t->txpriv.offset += sizeof(struct wsm_tx); + memset(wsm, 0, sizeof(*wsm)); + wsm->hdr.len = __cpu_to_le16(t->skb->len); + wsm->hdr.id = __cpu_to_le16(0x0004); + wsm->queue_id = wsm_queue_id_to_wsm(t->queue); + return wsm; +} + +/* BT Coex specific handling */ +static void +cw1200_tx_h_bt(struct cw1200_common *priv, + struct cw1200_txinfo *t, + struct wsm_tx *wsm) +{ + u8 priority = 0; + + if (!priv->bt_present) + return; + + if (ieee80211_is_nullfunc(t->hdr->frame_control)) { + priority = WSM_EPTA_PRIORITY_MGT; + } else if (ieee80211_is_data(t->hdr->frame_control)) { + /* Skip LLC SNAP header (+6) */ + u8 *payload = &t->skb->data[t->hdrlen]; + u16 *ethertype = (u16 *)&payload[6]; + if (*ethertype == __be16_to_cpu(ETH_P_PAE)) + priority = WSM_EPTA_PRIORITY_EAPOL; + } else if (ieee80211_is_assoc_req(t->hdr->frame_control) || + ieee80211_is_reassoc_req(t->hdr->frame_control)) { + struct ieee80211_mgmt *mgt_frame = + (struct ieee80211_mgmt *)t->hdr; + + if (mgt_frame->u.assoc_req.listen_interval < + priv->listen_interval) { + pr_debug("Modified Listen Interval to %d from %d\n", + priv->listen_interval, + mgt_frame->u.assoc_req.listen_interval); + /* Replace listen interval derieved from + * the one read from SDD */ + mgt_frame->u.assoc_req.listen_interval = + priv->listen_interval; + } + } + + if (!priority) { + if (ieee80211_is_action(t->hdr->frame_control)) + priority = WSM_EPTA_PRIORITY_ACTION; + else if (ieee80211_is_mgmt(t->hdr->frame_control)) + priority = WSM_EPTA_PRIORITY_MGT; + else if ((wsm->queue_id == WSM_QUEUE_VOICE)) + priority = WSM_EPTA_PRIORITY_VOICE; + else if ((wsm->queue_id == WSM_QUEUE_VIDEO)) + priority = WSM_EPTA_PRIORITY_VIDEO; + else + priority = WSM_EPTA_PRIORITY_DATA; + } + + pr_debug("[TX] EPTA priority %d.\n", priority); + + wsm->flags |= priority << 1; +} + +static int +cw1200_tx_h_rate_policy(struct cw1200_common *priv, + struct cw1200_txinfo *t, + struct wsm_tx *wsm) +{ + bool tx_policy_renew = false; + + t->txpriv.rate_id = tx_policy_get(priv, + t->tx_info->control.rates, IEEE80211_TX_MAX_RATES, + &tx_policy_renew); + if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID) + return -EFAULT; + + wsm->flags |= t->txpriv.rate_id << 4; + + t->rate = cw1200_get_tx_rate(priv, + &t->tx_info->control.rates[0]), + wsm->max_tx_rate = t->rate->hw_value; + if (t->rate->flags & IEEE80211_TX_RC_MCS) { + if (cw1200_ht_greenfield(&priv->ht_info)) + wsm->ht_tx_parameters |= + __cpu_to_le32(WSM_HT_TX_GREENFIELD); + else + wsm->ht_tx_parameters |= + __cpu_to_le32(WSM_HT_TX_MIXED); + } + + if (tx_policy_renew) { + pr_debug("[TX] TX policy renew.\n"); + /* It's not so optimal to stop TX queues every now and then. + * Better to reimplement task scheduling with + * a counter. TODO. */ + wsm_lock_tx_async(priv); + cw1200_tx_queues_lock(priv); + if (queue_work(priv->workqueue, + &priv->tx_policy_upload_work) <= 0) { + cw1200_tx_queues_unlock(priv); + wsm_unlock_tx(priv); + } + } + return 0; +} + +static bool +cw1200_tx_h_pm_state(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + int was_buffered = 1; + + if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM && + !priv->buffered_multicasts) { + priv->buffered_multicasts = true; + if (priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_start_work); + } + + if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID) + was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++; + + return !was_buffered; +} + +/* ******************************************************************** */ + +void cw1200_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct cw1200_common *priv = dev->priv; + struct cw1200_txinfo t = { + .skb = skb, + .queue = skb_get_queue_mapping(skb), + .tx_info = IEEE80211_SKB_CB(skb), + .hdr = (struct ieee80211_hdr *)skb->data, + .txpriv.tid = CW1200_MAX_TID, + .txpriv.rate_id = CW1200_INVALID_RATE_ID, + }; + struct ieee80211_sta *sta; + struct wsm_tx *wsm; + bool tid_update = 0; + u8 flags = 0; + int ret; + + if (priv->bh_error) + goto drop; + + t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control); + t.da = ieee80211_get_DA(t.hdr); + if (control) { + t.sta = control->sta; + t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv; + } + + if (WARN_ON(t.queue >= 4)) + goto drop; + + ret = cw1200_tx_h_calc_link_ids(priv, &t); + if (ret) + goto drop; + + pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n", + skb->len, t.queue, t.txpriv.link_id, + t.txpriv.raw_link_id); + + cw1200_tx_h_pm(priv, &t); + cw1200_tx_h_calc_tid(priv, &t); + ret = cw1200_tx_h_crypt(priv, &t); + if (ret) + goto drop; + ret = cw1200_tx_h_align(priv, &t, &flags); + if (ret) + goto drop; + ret = cw1200_tx_h_action(priv, &t); + if (ret) + goto drop; + wsm = cw1200_tx_h_wsm(priv, &t); + if (!wsm) { + ret = -ENOMEM; + goto drop; + } + wsm->flags |= flags; + cw1200_tx_h_bt(priv, &t, wsm); + ret = cw1200_tx_h_rate_policy(priv, &t, wsm); + if (ret) + goto drop; + + rcu_read_lock(); + sta = rcu_dereference(t.sta); + + spin_lock_bh(&priv->ps_state_lock); + { + tid_update = cw1200_tx_h_pm_state(priv, &t); + BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue], + t.skb, &t.txpriv)); + } + spin_unlock_bh(&priv->ps_state_lock); + + if (tid_update && sta) + ieee80211_sta_set_buffered(sta, t.txpriv.tid, true); + + rcu_read_unlock(); + + cw1200_bh_wakeup(priv); + + return; + +drop: + cw1200_skb_dtor(priv, skb, &t.txpriv); + return; +} + +/* ******************************************************************** */ + +static int cw1200_handle_action_rx(struct cw1200_common *priv, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + /* Filter block ACK negotiation: fully controlled by firmware */ + if (mgmt->u.action.category == WLAN_CATEGORY_BACK) + return 1; + + return 0; +} + +static int cw1200_handle_pspoll(struct cw1200_common *priv, + struct sk_buff *skb) +{ + struct ieee80211_sta *sta; + struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data; + int link_id = 0; + u32 pspoll_mask = 0; + int drop = 1; + int i; + + if (priv->join_status != CW1200_JOIN_STATUS_AP) + goto done; + if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN)) + goto done; + + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, pspoll->ta); + if (sta) { + struct cw1200_sta_priv *sta_priv; + sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; + link_id = sta_priv->link_id; + pspoll_mask = BIT(sta_priv->link_id); + } + rcu_read_unlock(); + if (!link_id) + goto done; + + priv->pspoll_mask |= pspoll_mask; + drop = 0; + + /* Do not report pspols if data for given link id is + * queued already. */ + for (i = 0; i < 4; ++i) { + if (cw1200_queue_get_num_queued(&priv->tx_queue[i], + pspoll_mask)) { + cw1200_bh_wakeup(priv); + drop = 1; + break; + } + } + pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd"); +done: + return drop; +} + +/* ******************************************************************** */ + +void cw1200_tx_confirm_cb(struct cw1200_common *priv, + int link_id, + struct wsm_tx_confirm *arg) +{ + u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + struct sk_buff *skb; + const struct cw1200_txpriv *txpriv; + + pr_debug("[TX] TX confirm: %d, %d.\n", + arg->status, arg->ack_failures); + + if (cw1200_itp_tx_running(priv)) + return; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + return; + } + + if (WARN_ON(queue_id >= 4)) + return; + + if (arg->status) + pr_debug("TX failed: %d.\n", arg->status); + + if ((arg->status == WSM_REQUEUE) && + (arg->flags & WSM_TX_STATUS_REQUEUE)) { + /* "Requeue" means "implicit suspend" */ + struct wsm_suspend_resume suspend = { + .link_id = link_id, + .stop = 1, + .multicast = !link_id, + }; + cw1200_suspend_resume(priv, &suspend); + wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n", + link_id, + cw1200_queue_get_generation(arg->packet_id) + 1, + priv->sta_asleep_mask); + cw1200_queue_requeue(queue, arg->packet_id); + spin_lock_bh(&priv->ps_state_lock); + if (!link_id) { + priv->buffered_multicasts = true; + if (priv->sta_asleep_mask) { + queue_work(priv->workqueue, + &priv->multicast_start_work); + } + } + spin_unlock_bh(&priv->ps_state_lock); + } else if (!cw1200_queue_get_skb(queue, arg->packet_id, + &skb, &txpriv)) { + struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb); + int tx_count = arg->ack_failures; + u8 ht_flags = 0; + int i; + + if (cw1200_ht_greenfield(&priv->ht_info)) + ht_flags |= IEEE80211_TX_RC_GREEN_FIELD; + + spin_lock(&priv->bss_loss_lock); + if (priv->bss_loss_state && + arg->packet_id == priv->bss_loss_confirm_id) { + if (arg->status) { + /* Recovery failed */ + __cw1200_cqm_bssloss_sm(priv, 0, 0, 1); + } else { + /* Recovery succeeded */ + __cw1200_cqm_bssloss_sm(priv, 0, 1, 0); + } + } + spin_unlock(&priv->bss_loss_lock); + + if (!arg->status) { + tx->flags |= IEEE80211_TX_STAT_ACK; + ++tx_count; + cw1200_debug_txed(priv); + if (arg->flags & WSM_TX_STATUS_AGGREGATION) { + /* Do not report aggregation to mac80211: + * it confuses minstrel a lot. */ + /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */ + cw1200_debug_txed_agg(priv); + } + } else { + if (tx_count) + ++tx_count; + } + + for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { + if (tx->status.rates[i].count >= tx_count) { + tx->status.rates[i].count = tx_count; + break; + } + tx_count -= tx->status.rates[i].count; + if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS) + tx->status.rates[i].flags |= ht_flags; + } + + for (++i; i < IEEE80211_TX_MAX_RATES; ++i) { + tx->status.rates[i].count = 0; + tx->status.rates[i].idx = -1; + } + + /* Pull off any crypto trailers that we added on */ + if (tx->control.hw_key) { + skb_trim(skb, skb->len - tx->control.hw_key->icv_len); + if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) + skb_trim(skb, skb->len - 8); /* MIC space */ + } + cw1200_queue_remove(queue, arg->packet_id); + } + /* XXX TODO: Only wake if there are pending transmits.. */ + cw1200_bh_wakeup(priv); +} + +static void cw1200_notify_buffered_tx(struct cw1200_common *priv, + struct sk_buff *skb, int link_id, int tid) +{ + struct ieee80211_sta *sta; + struct ieee80211_hdr *hdr; + u8 *buffered; + u8 still_buffered = 0; + + if (link_id && tid < CW1200_MAX_TID) { + buffered = priv->link_id_db + [link_id - 1].buffered; + + spin_lock_bh(&priv->ps_state_lock); + if (!WARN_ON(!buffered[tid])) + still_buffered = --buffered[tid]; + spin_unlock_bh(&priv->ps_state_lock); + + if (!still_buffered && tid < CW1200_MAX_TID) { + hdr = (struct ieee80211_hdr *)skb->data; + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, hdr->addr1); + if (sta) + ieee80211_sta_set_buffered(sta, tid, false); + rcu_read_unlock(); + } + } +} + +void cw1200_skb_dtor(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv) +{ + skb_pull(skb, txpriv->offset); + if (txpriv->rate_id != CW1200_INVALID_RATE_ID) { + cw1200_notify_buffered_tx(priv, skb, + txpriv->raw_link_id, txpriv->tid); + tx_policy_put(priv, txpriv->rate_id); + } + if (!cw1200_is_itp(priv)) + ieee80211_tx_status(priv->hw, skb); +} + +void cw1200_rx_cb(struct cw1200_common *priv, + struct wsm_rx *arg, + int link_id, + struct sk_buff **skb_p) +{ + struct sk_buff *skb = *skb_p; + struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct cw1200_link_entry *entry = NULL; + unsigned long grace_period; + + bool early_data = false; + bool p2p = priv->vif && priv->vif->p2p; + size_t hdrlen; + hdr->flag = 0; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + goto drop; + } + + if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) { + entry = &priv->link_id_db[link_id - 1]; + if (entry->status == CW1200_LINK_SOFT && + ieee80211_is_data(frame->frame_control)) + early_data = true; + entry->timestamp = jiffies; + } else if (p2p && + ieee80211_is_action(frame->frame_control) && + (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { + pr_debug("[RX] Going to MAP&RESET link ID\n"); + WARN_ON(work_pending(&priv->linkid_reset_work)); + memcpy(&priv->action_frame_sa[0], + ieee80211_get_SA(frame), ETH_ALEN); + priv->action_linkid = 0; + schedule_work(&priv->linkid_reset_work); + } + + if (link_id && p2p && + ieee80211_is_action(frame->frame_control) && + (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { + /* Link ID already exists for the ACTION frame. + * Reset and Remap */ + WARN_ON(work_pending(&priv->linkid_reset_work)); + memcpy(&priv->action_frame_sa[0], + ieee80211_get_SA(frame), ETH_ALEN); + priv->action_linkid = link_id; + schedule_work(&priv->linkid_reset_work); + } + if (arg->status) { + if (arg->status == WSM_STATUS_MICFAILURE) { + pr_debug("[RX] MIC failure.\n"); + hdr->flag |= RX_FLAG_MMIC_ERROR; + } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) { + pr_debug("[RX] No key found.\n"); + goto drop; + } else { + pr_debug("[RX] Receive failure: %d.\n", + arg->status); + goto drop; + } + } + + if (skb->len < sizeof(struct ieee80211_pspoll)) { + wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n"); + goto drop; + } + + if (ieee80211_is_pspoll(frame->frame_control)) + if (cw1200_handle_pspoll(priv, skb)) + goto drop; + + hdr->mactime = 0; /* Not supported by WSM */ + hdr->band = ((arg->channel_number & 0xff00) || + (arg->channel_number > 14)) ? + IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + hdr->freq = ieee80211_channel_to_frequency( + arg->channel_number, + hdr->band); + + if (arg->rx_rate >= 14) { + hdr->flag |= RX_FLAG_HT; + hdr->rate_idx = arg->rx_rate - 14; + } else if (arg->rx_rate >= 4) { + hdr->rate_idx = arg->rx_rate - 2; + } else { + hdr->rate_idx = arg->rx_rate; + } + + hdr->signal = (s8)arg->rcpi_rssi; + hdr->antenna = 0; + + hdrlen = ieee80211_hdrlen(frame->frame_control); + + if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { + size_t iv_len = 0, icv_len = 0; + + hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED; + + /* Oops... There is no fast way to ask mac80211 about + * IV/ICV lengths. Even defineas are not exposed.*/ + switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { + case WSM_RX_STATUS_WEP: + iv_len = 4 /* WEP_IV_LEN */; + icv_len = 4 /* WEP_ICV_LEN */; + break; + case WSM_RX_STATUS_TKIP: + iv_len = 8 /* TKIP_IV_LEN */; + icv_len = 4 /* TKIP_ICV_LEN */ + + 8 /*MICHAEL_MIC_LEN*/; + hdr->flag |= RX_FLAG_MMIC_STRIPPED; + break; + case WSM_RX_STATUS_AES: + iv_len = 8 /* CCMP_HDR_LEN */; + icv_len = 8 /* CCMP_MIC_LEN */; + break; + case WSM_RX_STATUS_WAPI: + iv_len = 18 /* WAPI_HDR_LEN */; + icv_len = 16 /* WAPI_MIC_LEN */; + break; + default: + pr_warn("Unknown encryption type %d\n", + WSM_RX_STATUS_ENCRYPTION(arg->flags)); + goto drop; + } + + /* Firmware strips ICV in case of MIC failure. */ + if (arg->status == WSM_STATUS_MICFAILURE) + icv_len = 0; + + if (skb->len < hdrlen + iv_len + icv_len) { + wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n"); + goto drop; + } + + /* Remove IV, ICV and MIC */ + skb_trim(skb, skb->len - icv_len); + memmove(skb->data + iv_len, skb->data, hdrlen); + skb_pull(skb, iv_len); + } + + /* Remove TSF from the end of frame */ + if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) { + memcpy(&hdr->mactime, skb->data + skb->len - 8, 8); + hdr->mactime = le64_to_cpu(hdr->mactime); + if (skb->len >= 8) + skb_trim(skb, skb->len - 8); + } + + cw1200_debug_rxed(priv); + if (arg->flags & WSM_RX_STATUS_AGGREGATE) + cw1200_debug_rxed_agg(priv); + + if (ieee80211_is_action(frame->frame_control) && + (arg->flags & WSM_RX_STATUS_ADDRESS1)) { + if (cw1200_handle_action_rx(priv, skb)) + return; + } else if (ieee80211_is_beacon(frame->frame_control) && + !arg->status && + !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid, + ETH_ALEN)) { + const u8 *tim_ie; + u8 *ies = ((struct ieee80211_mgmt *) + (skb->data))->u.beacon.variable; + size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); + + tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len); + if (tim_ie) { + struct ieee80211_tim_ie *tim = + (struct ieee80211_tim_ie *)&tim_ie[2]; + + if (priv->join_dtim_period != tim->dtim_period) { + priv->join_dtim_period = tim->dtim_period; + queue_work(priv->workqueue, + &priv->set_beacon_wakeup_period_work); + } + } + + /* Disable beacon filter once we're associated... */ + if (priv->disable_beacon_filter && + (priv->vif->bss_conf.assoc || + priv->vif->bss_conf.ibss_joined)) { + priv->disable_beacon_filter = false; + queue_work(priv->workqueue, + &priv->update_filtering_work); + } + } + + /* Stay awake after frame is received to give + * userspace chance to react and acquire appropriate + * wakelock. */ + if (ieee80211_is_auth(frame->frame_control)) + grace_period = 5 * HZ; + else if (ieee80211_is_deauth(frame->frame_control)) + grace_period = 5 * HZ; + else + grace_period = 1 * HZ; + cw1200_pm_stay_awake(&priv->pm_state, grace_period); + + if (cw1200_itp_rxed(priv, skb)) { + consume_skb(skb); + } else if (early_data) { + spin_lock_bh(&priv->ps_state_lock); + /* Double-check status with lock held */ + if (entry->status == CW1200_LINK_SOFT) + skb_queue_tail(&entry->rx_queue, skb); + else + ieee80211_rx_irqsafe(priv->hw, skb); + spin_unlock_bh(&priv->ps_state_lock); + } else { + ieee80211_rx_irqsafe(priv->hw, skb); + } + *skb_p = NULL; + + return; + +drop: + /* TODO: update failure counters */ + return; +} + +/* ******************************************************************** */ +/* Security */ + +int cw1200_alloc_key(struct cw1200_common *priv) +{ + int idx; + + idx = ffs(~priv->key_map) - 1; + if (idx < 0 || idx > WSM_KEY_MAX_INDEX) + return -1; + + priv->key_map |= BIT(idx); + priv->keys[idx].index = idx; + return idx; +} + +void cw1200_free_key(struct cw1200_common *priv, int idx) +{ + BUG_ON(!(priv->key_map & BIT(idx))); + memset(&priv->keys[idx], 0, sizeof(priv->keys[idx])); + priv->key_map &= ~BIT(idx); +} + +void cw1200_free_keys(struct cw1200_common *priv) +{ + memset(&priv->keys, 0, sizeof(priv->keys)); + priv->key_map = 0; +} + +int cw1200_upload_keys(struct cw1200_common *priv) +{ + int idx, ret = 0; + for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx) + if (priv->key_map & BIT(idx)) { + ret = wsm_add_key(priv, &priv->keys[idx]); + if (ret < 0) + break; + } + return ret; +} + +/* Workaround for WFD test case 6.1.10 */ +void cw1200_link_id_reset(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, linkid_reset_work); + int temp_linkid; + + if (!priv->action_linkid) { + /* In GO mode we can receive ACTION frames without a linkID */ + temp_linkid = cw1200_alloc_link_id(priv, + &priv->action_frame_sa[0]); + WARN_ON(!temp_linkid); + if (temp_linkid) { + /* Make sure we execute the WQ */ + flush_workqueue(priv->workqueue); + /* Release the link ID */ + spin_lock_bh(&priv->ps_state_lock); + priv->link_id_db[temp_linkid - 1].prev_status = + priv->link_id_db[temp_linkid - 1].status; + priv->link_id_db[temp_linkid - 1].status = + CW1200_LINK_RESET; + spin_unlock_bh(&priv->ps_state_lock); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + } + } else { + spin_lock_bh(&priv->ps_state_lock); + priv->link_id_db[priv->action_linkid - 1].prev_status = + priv->link_id_db[priv->action_linkid - 1].status; + priv->link_id_db[priv->action_linkid - 1].status = + CW1200_LINK_RESET_REMAP; + spin_unlock_bh(&priv->ps_state_lock); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + flush_workqueue(priv->workqueue); + } +} + +int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac) +{ + int i, ret = 0; + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) && + priv->link_id_db[i].status) { + priv->link_id_db[i].timestamp = jiffies; + ret = i + 1; + break; + } + } + spin_unlock_bh(&priv->ps_state_lock); + return ret; +} + +int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac) +{ + int i, ret = 0; + unsigned long max_inactivity = 0; + unsigned long now = jiffies; + + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (!priv->link_id_db[i].status) { + ret = i + 1; + break; + } else if (priv->link_id_db[i].status != CW1200_LINK_HARD && + !priv->tx_queue_stats.link_map_cache[i + 1]) { + unsigned long inactivity = + now - priv->link_id_db[i].timestamp; + if (inactivity < max_inactivity) + continue; + max_inactivity = inactivity; + ret = i + 1; + } + } + if (ret) { + struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1]; + pr_debug("[AP] STA added, link_id: %d\n", ret); + entry->status = CW1200_LINK_RESERVE; + memcpy(&entry->mac, mac, ETH_ALEN); + memset(&entry->buffered, 0, CW1200_MAX_TID); + skb_queue_head_init(&entry->rx_queue); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + } else { + wiphy_info(priv->hw->wiphy, + "[AP] Early: no more link IDs available.\n"); + } + + spin_unlock_bh(&priv->ps_state_lock); + return ret; +} + +void cw1200_link_id_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, link_id_work); + wsm_flush_tx(priv); + cw1200_link_id_gc_work(&priv->link_id_gc_work.work); + wsm_unlock_tx(priv); +} + +void cw1200_link_id_gc_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, link_id_gc_work.work); + struct wsm_reset reset = { + .reset_statistics = false, + }; + struct wsm_map_link map_link = { + .link_id = 0, + }; + unsigned long now = jiffies; + unsigned long next_gc = -1; + long ttl; + bool need_reset; + u32 mask; + int i; + + if (priv->join_status != CW1200_JOIN_STATUS_AP) + return; + + wsm_lock_tx(priv); + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + need_reset = false; + mask = BIT(i + 1); + if (priv->link_id_db[i].status == CW1200_LINK_RESERVE || + (priv->link_id_db[i].status == CW1200_LINK_HARD && + !(priv->link_id_map & mask))) { + if (priv->link_id_map & mask) { + priv->sta_asleep_mask &= ~mask; + priv->pspoll_mask &= ~mask; + need_reset = true; + } + priv->link_id_map |= mask; + if (priv->link_id_db[i].status != CW1200_LINK_HARD) + priv->link_id_db[i].status = CW1200_LINK_SOFT; + memcpy(map_link.mac_addr, priv->link_id_db[i].mac, + ETH_ALEN); + spin_unlock_bh(&priv->ps_state_lock); + if (need_reset) { + reset.link_id = i + 1; + wsm_reset(priv, &reset); + } + map_link.link_id = i + 1; + wsm_map_link(priv, &map_link); + next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT); + spin_lock_bh(&priv->ps_state_lock); + } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) { + ttl = priv->link_id_db[i].timestamp - now + + CW1200_LINK_ID_GC_TIMEOUT; + if (ttl <= 0) { + need_reset = true; + priv->link_id_db[i].status = CW1200_LINK_OFF; + priv->link_id_map &= ~mask; + priv->sta_asleep_mask &= ~mask; + priv->pspoll_mask &= ~mask; + memset(map_link.mac_addr, 0, ETH_ALEN); + spin_unlock_bh(&priv->ps_state_lock); + reset.link_id = i + 1; + wsm_reset(priv, &reset); + spin_lock_bh(&priv->ps_state_lock); + } else { + next_gc = min_t(unsigned long, next_gc, ttl); + } + } else if (priv->link_id_db[i].status == CW1200_LINK_RESET || + priv->link_id_db[i].status == + CW1200_LINK_RESET_REMAP) { + int status = priv->link_id_db[i].status; + priv->link_id_db[i].status = + priv->link_id_db[i].prev_status; + priv->link_id_db[i].timestamp = now; + reset.link_id = i + 1; + spin_unlock_bh(&priv->ps_state_lock); + wsm_reset(priv, &reset); + if (status == CW1200_LINK_RESET_REMAP) { + memcpy(map_link.mac_addr, + priv->link_id_db[i].mac, + ETH_ALEN); + map_link.link_id = i + 1; + wsm_map_link(priv, &map_link); + next_gc = min(next_gc, + CW1200_LINK_ID_GC_TIMEOUT); + } + spin_lock_bh(&priv->ps_state_lock); + } + if (need_reset) { + skb_queue_purge(&priv->link_id_db[i].rx_queue); + pr_debug("[AP] STA removed, link_id: %d\n", + reset.link_id); + } + } + spin_unlock_bh(&priv->ps_state_lock); + if (next_gc != -1) + queue_delayed_work(priv->workqueue, + &priv->link_id_gc_work, next_gc); + wsm_unlock_tx(priv); +} diff --git a/drivers/net/wireless/cw1200/txrx.h b/drivers/net/wireless/cw1200/txrx.h new file mode 100644 index 000000000000..492a4e14213b --- /dev/null +++ b/drivers/net/wireless/cw1200/txrx.h @@ -0,0 +1,106 @@ +/* + * Datapath interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_TXRX_H +#define CW1200_TXRX_H + +#include + +/* extern */ struct ieee80211_hw; +/* extern */ struct sk_buff; +/* extern */ struct wsm_tx; +/* extern */ struct wsm_rx; +/* extern */ struct wsm_tx_confirm; +/* extern */ struct cw1200_txpriv; + +struct tx_policy { + union { + __le32 tbl[3]; + u8 raw[12]; + }; + u8 defined; + u8 usage_count; + u8 retry_count; + u8 uploaded; +}; + +struct tx_policy_cache_entry { + struct tx_policy policy; + struct list_head link; +}; + +#define TX_POLICY_CACHE_SIZE (8) +struct tx_policy_cache { + struct tx_policy_cache_entry cache[TX_POLICY_CACHE_SIZE]; + struct list_head used; + struct list_head free; + spinlock_t lock; /* Protect policy cache */ +}; + +/* ******************************************************************** */ +/* TX policy cache */ +/* Intention of TX policy cache is an overcomplicated WSM API. + * Device does not accept per-PDU tx retry sequence. + * It uses "tx retry policy id" instead, so driver code has to sync + * linux tx retry sequences with a retry policy table in the device. + */ +void tx_policy_init(struct cw1200_common *priv); +void tx_policy_upload_work(struct work_struct *work); +void tx_policy_clean(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* TX implementation */ + +u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, + u32 rates); +void cw1200_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb); +void cw1200_skb_dtor(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv); + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_tx_confirm_cb(struct cw1200_common *priv, + int link_id, + struct wsm_tx_confirm *arg); +void cw1200_rx_cb(struct cw1200_common *priv, + struct wsm_rx *arg, + int link_id, + struct sk_buff **skb_p); + +/* ******************************************************************** */ +/* Timeout */ + +void cw1200_tx_timeout(struct work_struct *work); + +/* ******************************************************************** */ +/* Security */ +int cw1200_alloc_key(struct cw1200_common *priv); +void cw1200_free_key(struct cw1200_common *priv, int idx); +void cw1200_free_keys(struct cw1200_common *priv); +int cw1200_upload_keys(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* Workaround for WFD test case 6.1.10 */ +void cw1200_link_id_reset(struct work_struct *work); + +#define CW1200_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ)) + +int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac); +int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac); +void cw1200_link_id_work(struct work_struct *work); +void cw1200_link_id_gc_work(struct work_struct *work); + + +#endif /* CW1200_TXRX_H */ diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c new file mode 100644 index 000000000000..4db6cc16879c --- /dev/null +++ b/drivers/net/wireless/cw1200/wsm.c @@ -0,0 +1,1884 @@ +/* + * WSM host interface (HI) implementation for + * ST-Ericsson CW1200 mac80211 drivers. + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "cw1200.h" +#include "wsm.h" +#include "bh.h" +#include "sta.h" +#include "debug.h" +#include "itp.h" + +#define WSM_CMD_TIMEOUT (2 * HZ) /* With respect to interrupt loss */ +#define WSM_CMD_START_TIMEOUT (7 * HZ) +#define WSM_CMD_RESET_TIMEOUT (3 * HZ) /* 2 sec. timeout was observed. */ +#define WSM_CMD_MAX_TIMEOUT (3 * HZ) + +#define WSM_SKIP(buf, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + goto underflow; \ + (buf)->data += size; \ + } while (0) + +#define WSM_GET(buf, ptr, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + goto underflow; \ + memcpy(ptr, (buf)->data, size); \ + (buf)->data += size; \ + } while (0) + +#define __WSM_GET(buf, type, cvt) \ + ({ \ + type val; \ + if ((buf)->data + sizeof(type) > (buf)->end) \ + goto underflow; \ + val = cvt(*(type *)(buf)->data); \ + (buf)->data += sizeof(type); \ + val; \ + }) + +#define WSM_GET8(buf) __WSM_GET(buf, u8, (u8)) +#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16_to_cpu) +#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32_to_cpu) + +#define WSM_PUT(buf, ptr, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + if (wsm_buf_reserve((buf), size)) \ + goto nomem; \ + memcpy((buf)->data, ptr, size); \ + (buf)->data += size; \ + } while (0) + +#define __WSM_PUT(buf, val, type, cvt) \ + do { \ + if ((buf)->data + sizeof(type) > (buf)->end) \ + if (wsm_buf_reserve((buf), sizeof(type))) \ + goto nomem; \ + *(type *)(buf)->data = cvt(val); \ + (buf)->data += sizeof(type); \ + } while (0) + +#define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, (u8)) +#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __cpu_to_le16) +#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __cpu_to_le32) + +static void wsm_buf_reset(struct wsm_buf *buf); +static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size); + +static int wsm_cmd_send(struct cw1200_common *priv, + struct wsm_buf *buf, + void *arg, u16 cmd, long tmo); + +#define wsm_cmd_lock(__priv) mutex_lock(&((__priv)->wsm_cmd_mux)) +#define wsm_cmd_unlock(__priv) mutex_unlock(&((__priv)->wsm_cmd_mux)) + +/* ******************************************************************** */ +/* WSM API implementation */ + +static int wsm_generic_confirm(struct cw1200_common *priv, + void *arg, + struct wsm_buf *buf) +{ + u32 status = WSM_GET32(buf); + if (status != WSM_STATUS_SUCCESS) + return -EINVAL; + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +int wsm_configuration(struct cw1200_common *priv, struct wsm_configuration *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->dot11MaxTransmitMsduLifeTime); + WSM_PUT32(buf, arg->dot11MaxReceiveLifeTime); + WSM_PUT32(buf, arg->dot11RtsThreshold); + + /* DPD block. */ + WSM_PUT16(buf, arg->dpdData_size + 12); + WSM_PUT16(buf, 1); /* DPD version */ + WSM_PUT(buf, arg->dot11StationId, ETH_ALEN); + WSM_PUT16(buf, 5); /* DPD flags */ + WSM_PUT(buf, arg->dpdData, arg->dpdData_size); + + ret = wsm_cmd_send(priv, buf, arg, + WSM_CONFIGURATION_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_configuration_confirm(struct cw1200_common *priv, + struct wsm_configuration *arg, + struct wsm_buf *buf) +{ + int i; + int status; + + status = WSM_GET32(buf); + if (WARN_ON(status != WSM_STATUS_SUCCESS)) + return -EINVAL; + + WSM_GET(buf, arg->dot11StationId, ETH_ALEN); + arg->dot11FrequencyBandsSupported = WSM_GET8(buf); + WSM_SKIP(buf, 1); + arg->supportedRateMask = WSM_GET32(buf); + for (i = 0; i < 2; ++i) { + arg->txPowerRange[i].min_power_level = WSM_GET32(buf); + arg->txPowerRange[i].max_power_level = WSM_GET32(buf); + arg->txPowerRange[i].stepping = WSM_GET32(buf); + } + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u16 cmd = WSM_RESET_REQ_ID | WSM_TX_LINK_ID(arg->link_id); + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->reset_statistics ? 0 : 1); + ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_RESET_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +struct wsm_mib { + u16 mib_id; + void *buf; + size_t buf_size; +}; + +int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *_buf, + size_t buf_size) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_mib mib_buf = { + .mib_id = mib_id, + .buf = _buf, + .buf_size = buf_size, + }; + wsm_cmd_lock(priv); + + WSM_PUT16(buf, mib_id); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, &mib_buf, + WSM_READ_MIB_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_read_mib_confirm(struct cw1200_common *priv, + struct wsm_mib *arg, + struct wsm_buf *buf) +{ + u16 size; + if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS)) + return -EINVAL; + + if (WARN_ON(WSM_GET16(buf) != arg->mib_id)) + return -EINVAL; + + size = WSM_GET16(buf); + if (size > arg->buf_size) + size = arg->buf_size; + + WSM_GET(buf, arg->buf, size); + arg->buf_size = size; + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *_buf, + size_t buf_size) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_mib mib_buf = { + .mib_id = mib_id, + .buf = _buf, + .buf_size = buf_size, + }; + + wsm_cmd_lock(priv); + + WSM_PUT16(buf, mib_id); + WSM_PUT16(buf, buf_size); + WSM_PUT(buf, _buf, buf_size); + + ret = wsm_cmd_send(priv, buf, &mib_buf, + WSM_WRITE_MIB_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_write_mib_confirm(struct cw1200_common *priv, + struct wsm_mib *arg, + struct wsm_buf *buf) +{ + int ret; + + ret = wsm_generic_confirm(priv, arg, buf); + if (ret) + return ret; + + if (arg->mib_id == WSM_MIB_ID_OPERATIONAL_POWER_MODE) { + /* OperationalMode: update PM status. */ + const char *p = arg->buf; + cw1200_enable_powersave(priv, (p[0] & 0x0F) ? true : false); + } + return 0; +} + +/* ******************************************************************** */ + +int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg) +{ + int i; + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + if (arg->num_channels > 48) + return -EINVAL; + + if (arg->num_ssids > 2) + return -EINVAL; + + if (arg->band > 1) + return -EINVAL; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->band); + WSM_PUT8(buf, arg->type); + WSM_PUT8(buf, arg->flags); + WSM_PUT8(buf, arg->max_tx_rate); + WSM_PUT32(buf, arg->auto_scan_interval); + WSM_PUT8(buf, arg->num_probes); + WSM_PUT8(buf, arg->num_channels); + WSM_PUT8(buf, arg->num_ssids); + WSM_PUT8(buf, arg->probe_delay); + + for (i = 0; i < arg->num_channels; ++i) { + WSM_PUT16(buf, arg->ch[i].number); + WSM_PUT16(buf, 0); + WSM_PUT32(buf, arg->ch[i].min_chan_time); + WSM_PUT32(buf, arg->ch[i].max_chan_time); + WSM_PUT32(buf, 0); + } + + for (i = 0; i < arg->num_ssids; ++i) { + WSM_PUT32(buf, arg->ssids[i].length); + WSM_PUT(buf, &arg->ssids[i].ssid[0], + sizeof(arg->ssids[i].ssid)); + } + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_START_SCAN_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_stop_scan(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, + WSM_STOP_SCAN_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + + +static int wsm_tx_confirm(struct cw1200_common *priv, + struct wsm_buf *buf, + int link_id) +{ + struct wsm_tx_confirm tx_confirm; + + tx_confirm.packet_id = WSM_GET32(buf); + tx_confirm.status = WSM_GET32(buf); + tx_confirm.tx_rate = WSM_GET8(buf); + tx_confirm.ack_failures = WSM_GET8(buf); + tx_confirm.flags = WSM_GET16(buf); + tx_confirm.media_delay = WSM_GET32(buf); + tx_confirm.tx_queue_delay = WSM_GET32(buf); + + cw1200_tx_confirm_cb(priv, link_id, &tx_confirm); + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_multi_tx_confirm(struct cw1200_common *priv, + struct wsm_buf *buf, int link_id) +{ + int ret; + int count; + int i; + + count = WSM_GET32(buf); + if (WARN_ON(count <= 0)) + return -EINVAL; + + if (count > 1) { + /* We already released one buffer, now for the rest */ + ret = wsm_release_tx_buffer(priv, count - 1); + if (ret < 0) + return ret; + else if (ret > 0) + cw1200_bh_wakeup(priv); + } + + cw1200_debug_txed_multi(priv, count); + for (i = 0; i < count; ++i) { + ret = wsm_tx_confirm(priv, buf, link_id); + if (ret) + return ret; + } + return ret; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +static int wsm_join_confirm(struct cw1200_common *priv, + struct wsm_join_cnf *arg, + struct wsm_buf *buf) +{ + arg->status = WSM_GET32(buf); + if (WARN_ON(arg->status) != WSM_STATUS_SUCCESS) + return -EINVAL; + + arg->min_power_level = WSM_GET32(buf); + arg->max_power_level = WSM_GET32(buf); + + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +int wsm_join(struct cw1200_common *priv, struct wsm_join *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_join_cnf resp; + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->band); + WSM_PUT16(buf, arg->channel_number); + WSM_PUT(buf, &arg->bssid[0], sizeof(arg->bssid)); + WSM_PUT16(buf, arg->atim_window); + WSM_PUT8(buf, arg->preamble_type); + WSM_PUT8(buf, arg->probe_for_join); + WSM_PUT8(buf, arg->dtim_period); + WSM_PUT8(buf, arg->flags); + WSM_PUT32(buf, arg->ssid_len); + WSM_PUT(buf, &arg->ssid[0], sizeof(arg->ssid)); + WSM_PUT32(buf, arg->beacon_interval); + WSM_PUT32(buf, arg->basic_rate_set); + + priv->tx_burst_idx = -1; + ret = wsm_cmd_send(priv, buf, &resp, + WSM_JOIN_REQ_ID, WSM_CMD_TIMEOUT); + /* TODO: Update state based on resp.min|max_power_level */ + + priv->join_complete_status = resp.status; + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_bss_params(struct cw1200_common *priv, + const struct wsm_set_bss_params *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, (arg->reset_beacon_loss ? 0x1 : 0)); + WSM_PUT8(buf, arg->beacon_lost_count); + WSM_PUT16(buf, arg->aid); + WSM_PUT32(buf, arg->operational_rate_set); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SET_BSS_PARAMS_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT(buf, arg, sizeof(*arg)); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_ADD_KEY_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_remove_key(struct cw1200_common *priv, const struct wsm_remove_key *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->index); + WSM_PUT8(buf, 0); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_REMOVE_KEY_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_tx_queue_params(struct cw1200_common *priv, + const struct wsm_set_tx_queue_params *arg, u8 id) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1}; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, queue_id_to_wmm_aci[id]); + WSM_PUT8(buf, 0); + WSM_PUT8(buf, arg->ackPolicy); + WSM_PUT8(buf, 0); + WSM_PUT32(buf, arg->maxTransmitLifetime); + WSM_PUT16(buf, arg->allowedMediumTime); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, 0x0012, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_edca_params(struct cw1200_common *priv, + const struct wsm_edca_params *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + /* Implemented according to specification. */ + + WSM_PUT16(buf, arg->params[3].cwmin); + WSM_PUT16(buf, arg->params[2].cwmin); + WSM_PUT16(buf, arg->params[1].cwmin); + WSM_PUT16(buf, arg->params[0].cwmin); + + WSM_PUT16(buf, arg->params[3].cwmax); + WSM_PUT16(buf, arg->params[2].cwmax); + WSM_PUT16(buf, arg->params[1].cwmax); + WSM_PUT16(buf, arg->params[0].cwmax); + + WSM_PUT8(buf, arg->params[3].aifns); + WSM_PUT8(buf, arg->params[2].aifns); + WSM_PUT8(buf, arg->params[1].aifns); + WSM_PUT8(buf, arg->params[0].aifns); + + WSM_PUT16(buf, arg->params[3].txop_limit); + WSM_PUT16(buf, arg->params[2].txop_limit); + WSM_PUT16(buf, arg->params[1].txop_limit); + WSM_PUT16(buf, arg->params[0].txop_limit); + + WSM_PUT32(buf, arg->params[3].max_rx_lifetime); + WSM_PUT32(buf, arg->params[2].max_rx_lifetime); + WSM_PUT32(buf, arg->params[1].max_rx_lifetime); + WSM_PUT32(buf, arg->params[0].max_rx_lifetime); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_EDCA_PARAMS_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_switch_channel(struct cw1200_common *priv, + const struct wsm_switch_channel *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->switch_count); + WSM_PUT16(buf, arg->channel_number); + + priv->channel_switch_in_progress = 1; + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SWITCH_CHANNEL_REQ_ID, WSM_CMD_TIMEOUT); + if (ret) + priv->channel_switch_in_progress = 0; + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + priv->ps_mode_switch_in_progress = 1; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->fast_psm_idle_period); + WSM_PUT8(buf, arg->ap_psm_change_period); + WSM_PUT8(buf, arg->min_auto_pspoll_period); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SET_PM_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->band); + WSM_PUT16(buf, arg->channel_number); + WSM_PUT32(buf, arg->ct_window); + WSM_PUT32(buf, arg->beacon_interval); + WSM_PUT8(buf, arg->dtim_period); + WSM_PUT8(buf, arg->preamble); + WSM_PUT8(buf, arg->probe_delay); + WSM_PUT8(buf, arg->ssid_len); + WSM_PUT(buf, arg->ssid, sizeof(arg->ssid)); + WSM_PUT32(buf, arg->basic_rate_set); + + priv->tx_burst_idx = -1; + ret = wsm_cmd_send(priv, buf, NULL, + WSM_START_REQ_ID, WSM_CMD_START_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_beacon_transmit(struct cw1200_common *priv, + const struct wsm_beacon_transmit *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->enable_beaconing ? 1 : 0); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_BEACON_TRANSMIT_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_start_find(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, 0x0019, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + +/* ******************************************************************** */ + +int wsm_stop_find(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, 0x001A, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + +/* ******************************************************************** */ + +int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u16 cmd = 0x001C | WSM_TX_LINK_ID(arg->link_id); + + wsm_cmd_lock(priv); + + WSM_PUT(buf, &arg->mac_addr[0], sizeof(arg->mac_addr)); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_update_ie(struct cw1200_common *priv, + const struct wsm_update_ie *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT16(buf, arg->what); + WSM_PUT16(buf, arg->count); + WSM_PUT(buf, arg->ies, arg->length); + + ret = wsm_cmd_send(priv, buf, NULL, 0x001B, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ +int wsm_set_probe_responder(struct cw1200_common *priv, bool enable) +{ + priv->rx_filter.probeResponder = enable; + return wsm_set_rx_filter(priv, &priv->rx_filter); +} + +/* ******************************************************************** */ +/* WSM indication events implementation */ +const char * const cw1200_fw_types[] = { + "ETF", + "WFM", + "WSM", + "HI test", + "Platform test" +}; + +static int wsm_startup_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + priv->wsm_caps.input_buffers = WSM_GET16(buf); + priv->wsm_caps.input_buffer_size = WSM_GET16(buf); + priv->wsm_caps.hw_id = WSM_GET16(buf); + priv->wsm_caps.hw_subid = WSM_GET16(buf); + priv->wsm_caps.status = WSM_GET16(buf); + priv->wsm_caps.fw_cap = WSM_GET16(buf); + priv->wsm_caps.fw_type = WSM_GET16(buf); + priv->wsm_caps.fw_api = WSM_GET16(buf); + priv->wsm_caps.fw_build = WSM_GET16(buf); + priv->wsm_caps.fw_ver = WSM_GET16(buf); + WSM_GET(buf, priv->wsm_caps.fw_label, sizeof(priv->wsm_caps.fw_label)); + priv->wsm_caps.fw_label[sizeof(priv->wsm_caps.fw_label) - 1] = 0; /* Do not trust FW too much... */ + + if (WARN_ON(priv->wsm_caps.status)) + return -EINVAL; + + if (WARN_ON(priv->wsm_caps.fw_type > 4)) + return -EINVAL; + + pr_info("CW1200 WSM init done.\n" + " Input buffers: %d x %d bytes\n" + " Hardware: %d.%d\n" + " %s firmware [%s], ver: %d, build: %d," + " api: %d, cap: 0x%.4X\n", + priv->wsm_caps.input_buffers, + priv->wsm_caps.input_buffer_size, + priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid, + cw1200_fw_types[priv->wsm_caps.fw_type], + priv->wsm_caps.fw_label, priv->wsm_caps.fw_ver, + priv->wsm_caps.fw_build, + priv->wsm_caps.fw_api, priv->wsm_caps.fw_cap); + + /* Disable unsupported frequency bands */ + if (!(priv->wsm_caps.fw_cap & 0x1)) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; + if (!(priv->wsm_caps.fw_cap & 0x2)) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + + priv->firmware_ready = 1; + wake_up(&priv->wsm_startup_done); + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_receive_indication(struct cw1200_common *priv, + int link_id, + struct wsm_buf *buf, + struct sk_buff **skb_p) +{ + struct wsm_rx rx; + struct ieee80211_hdr *hdr; + size_t hdr_len; + __le16 fctl; + + rx.status = WSM_GET32(buf); + rx.channel_number = WSM_GET16(buf); + rx.rx_rate = WSM_GET8(buf); + rx.rcpi_rssi = WSM_GET8(buf); + rx.flags = WSM_GET32(buf); + + /* FW Workaround: Drop probe resp or + beacon when RSSI is 0 + */ + hdr = (struct ieee80211_hdr *)(*skb_p)->data; + + if (!rx.rcpi_rssi && + (ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control))) + return 0; + + /* If no RSSI subscription has been made, + * convert RCPI to RSSI here + */ + if (!priv->cqm_use_rssi) + rx.rcpi_rssi = rx.rcpi_rssi / 2 - 110; + + fctl = *(__le16 *)buf->data; + hdr_len = buf->data - buf->begin; + skb_pull(*skb_p, hdr_len); + if (!rx.status && ieee80211_is_deauth(fctl)) { + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + /* Shedule unjoin work */ + pr_debug("[WSM] Issue unjoin command (RX).\n"); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } + } + cw1200_rx_cb(priv, &rx, link_id, skb_p); + if (*skb_p) + skb_push(*skb_p, hdr_len); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf) +{ + int first; + struct cw1200_wsm_event *event; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + return 0; + } + + event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL); + + event->evt.id = __le32_to_cpu(WSM_GET32(buf)); + event->evt.data = __le32_to_cpu(WSM_GET32(buf)); + + pr_debug("[WSM] Event: %d(%d)\n", + event->evt.id, event->evt.data); + + spin_lock(&priv->event_queue_lock); + first = list_empty(&priv->event_queue); + list_add_tail(&event->link, &priv->event_queue); + spin_unlock(&priv->event_queue_lock); + + if (first) + queue_work(priv->workqueue, &priv->event_handler); + + return 0; + +underflow: + kfree(event); + return -EINVAL; +} + +static int wsm_channel_switch_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + WARN_ON(WSM_GET32(buf)); + + priv->channel_switch_in_progress = 0; + wake_up(&priv->channel_switch_done); + + wsm_unlock_tx(priv); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_set_pm_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + /* TODO: Check buf (struct wsm_set_pm_complete) for validity */ + if (priv->ps_mode_switch_in_progress) { + priv->ps_mode_switch_in_progress = 0; + wake_up(&priv->ps_mode_switch_done); + } + return 0; +} + +static int wsm_scan_started(struct cw1200_common *priv, void *arg, + struct wsm_buf *buf) +{ + u32 status = WSM_GET32(buf); + if (status != WSM_STATUS_SUCCESS) { + cw1200_scan_failed_cb(priv); + return -EINVAL; + } + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_scan_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + struct wsm_scan_complete arg; + arg.status = WSM_GET32(buf); + arg.psm = WSM_GET8(buf); + arg.num_channels = WSM_GET8(buf); + cw1200_scan_complete_cb(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_join_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + struct wsm_join_complete arg; + arg.status = WSM_GET32(buf); + pr_debug("[WSM] Join complete indication, status: %d\n", arg.status); + cw1200_join_complete_cb(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_find_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + pr_warn("Implement find_complete_indication\n"); + return 0; +} + +static int wsm_ba_timeout_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + u32 dummy; + u8 tid; + u8 dummy2; + u8 addr[ETH_ALEN]; + + dummy = WSM_GET32(buf); + tid = WSM_GET8(buf); + dummy2 = WSM_GET8(buf); + WSM_GET(buf, addr, ETH_ALEN); + + pr_info("BlockACK timeout, tid %d, addr %pM\n", + tid, addr); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_suspend_resume_indication(struct cw1200_common *priv, + int link_id, struct wsm_buf *buf) +{ + u32 flags; + struct wsm_suspend_resume arg; + + flags = WSM_GET32(buf); + arg.link_id = link_id; + arg.stop = !(flags & 1); + arg.multicast = !!(flags & 8); + arg.queue = (flags >> 1) & 3; + + cw1200_suspend_resume(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + + +/* ******************************************************************** */ +/* WSM TX */ + +static int wsm_cmd_send(struct cw1200_common *priv, + struct wsm_buf *buf, + void *arg, u16 cmd, long tmo) +{ + size_t buf_len = buf->data - buf->begin; + int ret; + + /* Don't bother if we're dead. */ + if (priv->bh_error) { + ret = 0; + goto done; + } + + /* Block until the cmd buffer is completed. Tortuous. */ + spin_lock(&priv->wsm_cmd.lock); + while (!priv->wsm_cmd.done) { + spin_unlock(&priv->wsm_cmd.lock); + spin_lock(&priv->wsm_cmd.lock); + } + priv->wsm_cmd.done = 0; + spin_unlock(&priv->wsm_cmd.lock); + + if (cmd == WSM_WRITE_MIB_REQ_ID || + cmd == WSM_READ_MIB_REQ_ID) + pr_debug("[WSM] >>> 0x%.4X [MIB: 0x%.4X] (%zu)\n", + cmd, __le16_to_cpu(((__le16 *)buf->begin)[2]), + buf_len); + else + pr_debug("[WSM] >>> 0x%.4X (%zu)\n", cmd, buf_len); + + /* + * Due to buggy SPI on CW1200, we need to + * pad the message by a few bytes to ensure + * that it's completely received. + */ +#ifdef CONFIG_CW1200_ETF + if (!etf_mode) +#endif + buf_len += 4; + + /* Fill HI message header */ + /* BH will add sequence number */ + ((__le16 *)buf->begin)[0] = __cpu_to_le16(buf_len); + ((__le16 *)buf->begin)[1] = __cpu_to_le16(cmd); + + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(priv->wsm_cmd.ptr); + priv->wsm_cmd.ptr = buf->begin; + priv->wsm_cmd.len = buf_len; + priv->wsm_cmd.arg = arg; + priv->wsm_cmd.cmd = cmd; + spin_unlock(&priv->wsm_cmd.lock); + + cw1200_bh_wakeup(priv); + + /* Wait for command completion */ + ret = wait_event_timeout(priv->wsm_cmd_wq, + priv->wsm_cmd.done, tmo); + + if (!ret && !priv->wsm_cmd.done) { + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.done = 1; + priv->wsm_cmd.ptr = NULL; + spin_unlock(&priv->wsm_cmd.lock); + if (priv->bh_error) { + /* Return ok to help system cleanup */ + ret = 0; + } else { + pr_err("CMD req (0x%04x) stuck in firmware, killing BH\n", priv->wsm_cmd.cmd); + print_hex_dump_bytes("REQDUMP: ", DUMP_PREFIX_NONE, + buf->begin, buf_len); + pr_err("Outstanding outgoing frames: %d\n", priv->hw_bufs_used); + + /* Kill BH thread to report the error to the top layer. */ + atomic_add(1, &priv->bh_term); + wake_up(&priv->bh_wq); + ret = -ETIMEDOUT; + } + } else { + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(!priv->wsm_cmd.done); + ret = priv->wsm_cmd.ret; + spin_unlock(&priv->wsm_cmd.lock); + } +done: + wsm_buf_reset(buf); + return ret; +} + +#ifdef CONFIG_CW1200_ETF +int wsm_raw_cmd(struct cw1200_common *priv, u8 *data, size_t len) +{ + struct wsm_buf *buf = &priv->wsm_cmd_buf; + int ret; + + u16 *cmd = (u16 *)(data + 2); + + wsm_cmd_lock(priv); + + WSM_PUT(buf, data + 4, len - 4); /* Skip over header (u16+u16) */ + + ret = wsm_cmd_send(priv, buf, NULL, __le16_to_cpu(*cmd), WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} +#endif /* CONFIG_CW1200_ETF */ + +/* ******************************************************************** */ +/* WSM TX port control */ + +void wsm_lock_tx(struct cw1200_common *priv) +{ + wsm_cmd_lock(priv); + if (atomic_add_return(1, &priv->tx_lock) == 1) { + if (wsm_flush_tx(priv)) + pr_debug("[WSM] TX is locked.\n"); + } + wsm_cmd_unlock(priv); +} + +void wsm_lock_tx_async(struct cw1200_common *priv) +{ + if (atomic_add_return(1, &priv->tx_lock) == 1) + pr_debug("[WSM] TX is locked (async).\n"); +} + +bool wsm_flush_tx(struct cw1200_common *priv) +{ + unsigned long timestamp = jiffies; + bool pending = false; + long timeout; + int i; + + /* Flush must be called with TX lock held. */ + BUG_ON(!atomic_read(&priv->tx_lock)); + + /* First check if we really need to do something. + * It is safe to use unprotected access, as hw_bufs_used + * can only decrements. + */ + if (!priv->hw_bufs_used) + return true; + + if (priv->bh_error) { + /* In case of failure do not wait for magic. */ + pr_err("[WSM] Fatal error occured, will not flush TX.\n"); + return false; + } else { + /* Get a timestamp of "oldest" frame */ + for (i = 0; i < 4; ++i) + pending |= cw1200_queue_get_xmit_timestamp( + &priv->tx_queue[i], + ×tamp, 0xffffffff); + /* If there's nothing pending, we're good */ + if (!pending) + return true; + + timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies; + if (timeout < 0 || wait_event_timeout(priv->bh_evt_wq, + !priv->hw_bufs_used, + timeout) <= 0) { + /* Hmmm... Not good. Frame had stuck in firmware. */ + priv->bh_error = 1; + wiphy_err(priv->hw->wiphy, "[WSM] TX Frames (%d) stuck in firmware, killing BH\n", priv->hw_bufs_used); + wake_up(&priv->bh_wq); + return false; + } + + /* Ok, everything is flushed. */ + return true; + } +} + +void wsm_unlock_tx(struct cw1200_common *priv) +{ + int tx_lock; + tx_lock = atomic_sub_return(1, &priv->tx_lock); + BUG_ON(tx_lock < 0); + + if (tx_lock == 0) { + if (!priv->bh_error) + cw1200_bh_wakeup(priv); + pr_debug("[WSM] TX is unlocked.\n"); + } +} + +/* ******************************************************************** */ +/* WSM RX */ + +int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len) +{ + struct wsm_buf buf; + u32 reason; + u32 reg[18]; + char fname[48]; + unsigned int i; + + static const char * const reason_str[] = { + "undefined instruction", + "prefetch abort", + "data abort", + "unknown error", + }; + + buf.begin = buf.data = data; + buf.end = &buf.begin[len]; + + reason = WSM_GET32(&buf); + for (i = 0; i < ARRAY_SIZE(reg); ++i) + reg[i] = WSM_GET32(&buf); + WSM_GET(&buf, fname, sizeof(fname)); + + if (reason < 4) + wiphy_err(priv->hw->wiphy, + "Firmware exception: %s.\n", + reason_str[reason]); + else + wiphy_err(priv->hw->wiphy, + "Firmware assert at %.*s, line %d\n", + (int) sizeof(fname), fname, reg[1]); + + for (i = 0; i < 12; i += 4) + wiphy_err(priv->hw->wiphy, + "R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X,\n", + i + 0, reg[i + 0], i + 1, reg[i + 1], + i + 2, reg[i + 2], i + 3, reg[i + 3]); + wiphy_err(priv->hw->wiphy, + "R12: 0x%.8X, SP: 0x%.8X, LR: 0x%.8X, PC: 0x%.8X,\n", + reg[i + 0], reg[i + 1], reg[i + 2], reg[i + 3]); + i += 4; + wiphy_err(priv->hw->wiphy, + "CPSR: 0x%.8X, SPSR: 0x%.8X\n", + reg[i + 0], reg[i + 1]); + + print_hex_dump_bytes("R1: ", DUMP_PREFIX_NONE, + fname, sizeof(fname)); + return 0; + +underflow: + wiphy_err(priv->hw->wiphy, "Firmware exception.\n"); + print_hex_dump_bytes("Exception: ", DUMP_PREFIX_NONE, + data, len); + return -EINVAL; +} + +int wsm_handle_rx(struct cw1200_common *priv, u16 id, + struct wsm_hdr *wsm, struct sk_buff **skb_p) +{ + int ret = 0; + struct wsm_buf wsm_buf; + int link_id = (id >> 6) & 0x0F; + + /* Strip link id. */ + id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX); + + wsm_buf.begin = (u8 *)&wsm[0]; + wsm_buf.data = (u8 *)&wsm[1]; + wsm_buf.end = &wsm_buf.begin[__le32_to_cpu(wsm->len)]; + + pr_debug("[WSM] <<< 0x%.4X (%td)\n", id, + wsm_buf.end - wsm_buf.begin); + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) { + struct sk_buff *skb = alloc_skb(wsm_buf.end - wsm_buf.begin, GFP_KERNEL); + + /* Strip out Sequence num before passing up */ + wsm->id = __le16_to_cpu(wsm->id); + wsm->id &= 0x0FFF; + wsm->id = __cpu_to_le16(wsm->id); + + memcpy(skb_put(skb, wsm_buf.end - wsm_buf.begin), + wsm_buf.begin, + wsm_buf.end - wsm_buf.begin); + skb_queue_tail(&priv->etf_q, skb); + + /* Special case for startup */ + if (id == WSM_STARTUP_IND_ID) { + wsm_startup_indication(priv, &wsm_buf); + } else if (id & 0x0400) { + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.done = 1; + spin_unlock(&priv->wsm_cmd.lock); + wake_up(&priv->wsm_cmd_wq); + } + + goto out; + } +#endif + + if (id == WSM_TX_CONFIRM_IND_ID) { + ret = wsm_tx_confirm(priv, &wsm_buf, link_id); + } else if (id == WSM_MULTI_TX_CONFIRM_ID) { + ret = wsm_multi_tx_confirm(priv, &wsm_buf, link_id); + } else if (id & 0x0400) { + void *wsm_arg; + u16 wsm_cmd; + + /* Do not trust FW too much. Protection against repeated + * response and race condition removal (see above). + */ + spin_lock(&priv->wsm_cmd.lock); + wsm_arg = priv->wsm_cmd.arg; + wsm_cmd = priv->wsm_cmd.cmd & + ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX); + priv->wsm_cmd.cmd = 0xFFFF; + spin_unlock(&priv->wsm_cmd.lock); + + if (WARN_ON((id & ~0x0400) != wsm_cmd)) { + /* Note that any non-zero is a fatal retcode. */ + ret = -EINVAL; + goto out; + } + + /* Note that wsm_arg can be NULL in case of timeout in + * wsm_cmd_send(). + */ + + switch (id) { + case WSM_READ_MIB_RESP_ID: + if (wsm_arg) + ret = wsm_read_mib_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_WRITE_MIB_RESP_ID: + if (wsm_arg) + ret = wsm_write_mib_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_START_SCAN_RESP_ID: + if (wsm_arg) + ret = wsm_scan_started(priv, wsm_arg, &wsm_buf); + break; + case WSM_CONFIGURATION_RESP_ID: + if (wsm_arg) + ret = wsm_configuration_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_JOIN_RESP_ID: + if (wsm_arg) + ret = wsm_join_confirm(priv, wsm_arg, &wsm_buf); + break; + case WSM_STOP_SCAN_RESP_ID: + case WSM_RESET_RESP_ID: + case WSM_ADD_KEY_RESP_ID: + case WSM_REMOVE_KEY_RESP_ID: + case WSM_SET_PM_RESP_ID: + case WSM_SET_BSS_PARAMS_RESP_ID: + case 0x0412: /* set_tx_queue_params */ + case WSM_EDCA_PARAMS_RESP_ID: + case WSM_SWITCH_CHANNEL_RESP_ID: + case WSM_START_RESP_ID: + case WSM_BEACON_TRANSMIT_RESP_ID: + case 0x0419: /* start_find */ + case 0x041A: /* stop_find */ + case 0x041B: /* update_ie */ + case 0x041C: /* map_link */ + WARN_ON(wsm_arg != NULL); + ret = wsm_generic_confirm(priv, wsm_arg, &wsm_buf); + if (ret) { + wiphy_warn(priv->hw->wiphy, + "wsm_generic_confirm failed for request 0x%04x.\n", + id & ~0x0400); + + /* often 0x407 and 0x410 occur, this means we're dead.. */ + if (priv->join_status >= CW1200_JOIN_STATUS_JOINING) { + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } + } + break; + default: + wiphy_warn(priv->hw->wiphy, + "Unrecognized confirmation 0x%04x\n", + id & ~0x0400); + } + + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.ret = ret; + priv->wsm_cmd.done = 1; + spin_unlock(&priv->wsm_cmd.lock); + + ret = 0; /* Error response from device should ne stop BH. */ + + wake_up(&priv->wsm_cmd_wq); + } else if (id & 0x0800) { + switch (id) { + case WSM_STARTUP_IND_ID: + ret = wsm_startup_indication(priv, &wsm_buf); + break; + case WSM_RECEIVE_IND_ID: + ret = wsm_receive_indication(priv, link_id, + &wsm_buf, skb_p); + break; + case 0x0805: + ret = wsm_event_indication(priv, &wsm_buf); + break; + case WSM_SCAN_COMPLETE_IND_ID: + ret = wsm_scan_complete_indication(priv, &wsm_buf); + break; + case 0x0808: + ret = wsm_ba_timeout_indication(priv, &wsm_buf); + break; + case 0x0809: + ret = wsm_set_pm_indication(priv, &wsm_buf); + break; + case 0x080A: + ret = wsm_channel_switch_indication(priv, &wsm_buf); + break; + case 0x080B: + ret = wsm_find_complete_indication(priv, &wsm_buf); + break; + case 0x080C: + ret = wsm_suspend_resume_indication(priv, + link_id, &wsm_buf); + break; + case 0x080F: + ret = wsm_join_complete_indication(priv, &wsm_buf); + break; + default: + pr_warn("Unrecognised WSM ID %04x\n", id); + } + } else { + WARN_ON(1); + ret = -EINVAL; + } +out: + return ret; +} + +static bool wsm_handle_tx_data(struct cw1200_common *priv, + struct wsm_tx *wsm, + const struct ieee80211_tx_info *tx_info, + const struct cw1200_txpriv *txpriv, + struct cw1200_queue *queue) +{ + bool handled = false; + const struct ieee80211_hdr *frame = + (struct ieee80211_hdr *)&((u8 *)wsm)[txpriv->offset]; + __le16 fctl = frame->frame_control; + enum { + do_probe, + do_drop, + do_wep, + do_tx, + } action = do_tx; + + switch (priv->mode) { + case NL80211_IFTYPE_STATION: + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + action = do_tx; + else if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) + action = do_drop; + break; + case NL80211_IFTYPE_AP: + if (!priv->join_status) { + action = do_drop; + } else if (!(BIT(txpriv->raw_link_id) & + (BIT(0) | priv->link_id_map))) { + wiphy_warn(priv->hw->wiphy, + "A frame with expired link id is dropped.\n"); + action = do_drop; + } + if (cw1200_queue_get_generation(wsm->packet_id) > + CW1200_MAX_REQUEUE_ATTEMPTS) { + /* HACK!!! WSM324 firmware has tendency to requeue + * multicast frames in a loop, causing performance + * drop and high power consumption of the driver. + * In this situation it is better just to drop + * the problematic frame. + */ + wiphy_warn(priv->hw->wiphy, + "Too many attempts to requeue a frame; dropped.\n"); + action = do_drop; + } + break; + case NL80211_IFTYPE_ADHOC: + if (priv->join_status != CW1200_JOIN_STATUS_IBSS) + action = do_drop; + break; + case NL80211_IFTYPE_MESH_POINT: + action = do_tx; /* TODO: Test me! */ + break; + case NL80211_IFTYPE_MONITOR: + default: + action = do_drop; + break; + } + + if (action == do_tx) { + if (ieee80211_is_nullfunc(fctl)) { + spin_lock(&priv->bss_loss_lock); + if (priv->bss_loss_state) { + priv->bss_loss_confirm_id = wsm->packet_id; + wsm->queue_id = WSM_QUEUE_VOICE; + } + spin_unlock(&priv->bss_loss_lock); + } else if (ieee80211_is_probe_req(fctl)) { + action = do_probe; + } else if (ieee80211_is_deauth(fctl) && + priv->mode != NL80211_IFTYPE_AP) { + pr_debug("[WSM] Issue unjoin command due to tx deauth.\n"); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else if (ieee80211_has_protected(fctl) && + tx_info->control.hw_key && + tx_info->control.hw_key->keyidx != priv->wep_default_key_id && + (tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 || + tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) { + action = do_wep; + } + } + + switch (action) { + case do_probe: + /* An interesting FW "feature". Device filters probe responses. + * The easiest way to get it back is to convert + * probe request into WSM start_scan command. + */ + pr_debug("[WSM] Convert probe request to scan.\n"); + wsm_lock_tx_async(priv); + priv->pending_frame_id = __le32_to_cpu(wsm->packet_id); + if (queue_delayed_work(priv->workqueue, + &priv->scan.probe_work, 0) <= 0) + wsm_unlock_tx(priv); + handled = true; + break; + case do_drop: + pr_debug("[WSM] Drop frame (0x%.4X).\n", fctl); + BUG_ON(cw1200_queue_remove(queue, + __le32_to_cpu(wsm->packet_id))); + handled = true; + break; + case do_wep: + pr_debug("[WSM] Issue set_default_wep_key.\n"); + wsm_lock_tx_async(priv); + priv->wep_default_key_id = tx_info->control.hw_key->keyidx; + priv->pending_frame_id = __le32_to_cpu(wsm->packet_id); + if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0) + wsm_unlock_tx(priv); + handled = true; + break; + case do_tx: + pr_debug("[WSM] Transmit frame.\n"); + break; + default: + /* Do nothing */ + break; + } + return handled; +} + +static int cw1200_get_prio_queue(struct cw1200_common *priv, + u32 link_id_map, int *total) +{ + static const int urgent = BIT(CW1200_LINK_ID_AFTER_DTIM) | + BIT(CW1200_LINK_ID_UAPSD); + struct wsm_edca_queue_params *edca; + unsigned score, best = -1; + int winner = -1; + int queued; + int i; + + /* search for a winner using edca params */ + for (i = 0; i < 4; ++i) { + queued = cw1200_queue_get_num_queued(&priv->tx_queue[i], + link_id_map); + if (!queued) + continue; + *total += queued; + edca = &priv->edca.params[i]; + score = ((edca->aifns + edca->cwmin) << 16) + + ((edca->cwmax - edca->cwmin) * + (get_random_int() & 0xFFFF)); + if (score < best && (winner < 0 || i != 3)) { + best = score; + winner = i; + } + } + + /* override winner if bursting */ + if (winner >= 0 && priv->tx_burst_idx >= 0 && + winner != priv->tx_burst_idx && + !cw1200_queue_get_num_queued( + &priv->tx_queue[winner], + link_id_map & urgent) && + cw1200_queue_get_num_queued( + &priv->tx_queue[priv->tx_burst_idx], + link_id_map)) + winner = priv->tx_burst_idx; + + return winner; +} + +static int wsm_get_tx_queue_and_mask(struct cw1200_common *priv, + struct cw1200_queue **queue_p, + u32 *tx_allowed_mask_p, + bool *more) +{ + int idx; + u32 tx_allowed_mask; + int total = 0; + + /* Search for a queue with multicast frames buffered */ + if (priv->tx_multicast) { + tx_allowed_mask = BIT(CW1200_LINK_ID_AFTER_DTIM); + idx = cw1200_get_prio_queue(priv, + tx_allowed_mask, &total); + if (idx >= 0) { + *more = total > 1; + goto found; + } + } + + /* Search for unicast traffic */ + tx_allowed_mask = ~priv->sta_asleep_mask; + tx_allowed_mask |= BIT(CW1200_LINK_ID_UAPSD); + if (priv->sta_asleep_mask) { + tx_allowed_mask |= priv->pspoll_mask; + tx_allowed_mask &= ~BIT(CW1200_LINK_ID_AFTER_DTIM); + } else { + tx_allowed_mask |= BIT(CW1200_LINK_ID_AFTER_DTIM); + } + idx = cw1200_get_prio_queue(priv, + tx_allowed_mask, &total); + if (idx < 0) + return -ENOENT; + +found: + *queue_p = &priv->tx_queue[idx]; + *tx_allowed_mask_p = tx_allowed_mask; + return 0; +} + +int wsm_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst) +{ + struct wsm_tx *wsm = NULL; + struct ieee80211_tx_info *tx_info; + struct cw1200_queue *queue = NULL; + int queue_num; + u32 tx_allowed_mask = 0; + const struct cw1200_txpriv *txpriv = NULL; + int count = 0; + + /* More is used only for broadcasts. */ + bool more = false; + +#ifdef CONFIG_CW1200_ITP + count = cw1200_itp_get_tx(priv, data, tx_len, burst); + if (count) + return count; +#endif + + if (priv->wsm_cmd.ptr) { /* CMD request */ + ++count; + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(!priv->wsm_cmd.ptr); + *data = priv->wsm_cmd.ptr; + *tx_len = priv->wsm_cmd.len; + *burst = 1; + spin_unlock(&priv->wsm_cmd.lock); + } else { + for (;;) { + int ret; + + if (atomic_add_return(0, &priv->tx_lock)) + break; + + spin_lock_bh(&priv->ps_state_lock); + + ret = wsm_get_tx_queue_and_mask(priv, &queue, + &tx_allowed_mask, &more); + queue_num = queue - priv->tx_queue; + + if (priv->buffered_multicasts && + (ret || !more) && + (priv->tx_multicast || !priv->sta_asleep_mask)) { + priv->buffered_multicasts = false; + if (priv->tx_multicast) { + priv->tx_multicast = false; + queue_work(priv->workqueue, + &priv->multicast_stop_work); + } + } + + spin_unlock_bh(&priv->ps_state_lock); + + if (ret) + break; + + if (cw1200_queue_get(queue, + tx_allowed_mask, + &wsm, &tx_info, &txpriv)) + continue; + + if (wsm_handle_tx_data(priv, wsm, + tx_info, txpriv, queue)) + continue; /* Handled by WSM */ + + wsm->hdr.id &= __cpu_to_le16( + ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX)); + wsm->hdr.id |= cpu_to_le16( + WSM_TX_LINK_ID(txpriv->raw_link_id)); + priv->pspoll_mask &= ~BIT(txpriv->raw_link_id); + + *data = (u8 *)wsm; + *tx_len = __le16_to_cpu(wsm->hdr.len); + + /* allow bursting if txop is set */ + if (priv->edca.params[queue_num].txop_limit) + *burst = min(*burst, + (int)cw1200_queue_get_num_queued(queue, tx_allowed_mask) + 1); + else + *burst = 1; + + /* store index of bursting queue */ + if (*burst > 1) + priv->tx_burst_idx = queue_num; + else + priv->tx_burst_idx = -1; + + if (more) { + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) + &((u8 *)wsm)[txpriv->offset]; + /* more buffered multicast/broadcast frames + * ==> set MoreData flag in IEEE 802.11 header + * to inform PS STAs + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + + pr_debug("[WSM] >>> 0x%.4X (%zu) %p %c\n", + 0x0004, *tx_len, *data, + wsm->more ? 'M' : ' '); + ++count; + break; + } + } + + return count; +} + +void wsm_txed(struct cw1200_common *priv, u8 *data) +{ + if (data == priv->wsm_cmd.ptr) { + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.ptr = NULL; + spin_unlock(&priv->wsm_cmd.lock); + } +} + +/* ******************************************************************** */ +/* WSM buffer */ + +void wsm_buf_init(struct wsm_buf *buf) +{ + BUG_ON(buf->begin); + buf->begin = kmalloc(FWLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); + buf->end = buf->begin ? &buf->begin[FWLOAD_BLOCK_SIZE] : buf->begin; + wsm_buf_reset(buf); +} + +void wsm_buf_deinit(struct wsm_buf *buf) +{ + kfree(buf->begin); + buf->begin = buf->data = buf->end = NULL; +} + +static void wsm_buf_reset(struct wsm_buf *buf) +{ + if (buf->begin) { + buf->data = &buf->begin[4]; + *(u32 *)buf->begin = 0; + } else { + buf->data = buf->begin; + } +} + +static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size) +{ + size_t pos = buf->data - buf->begin; + size_t size = pos + extra_size; + + size = round_up(size, FWLOAD_BLOCK_SIZE); + + buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA); + if (buf->begin) { + buf->data = &buf->begin[pos]; + buf->end = &buf->begin[size]; + return 0; + } else { + buf->end = buf->data = buf->begin; + return -ENOMEM; + } +} diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h new file mode 100644 index 000000000000..8d902d6a7dc4 --- /dev/null +++ b/drivers/net/wireless/cw1200/wsm.h @@ -0,0 +1,1879 @@ +/* + * WSM host interface (HI) interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin + * + * Based on CW1200 UMAC WSM API, which is + * Copyright (C) ST-Ericsson SA 2010 + * Author: Stewart Mathers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_WSM_H_INCLUDED +#define CW1200_WSM_H_INCLUDED + +#include + +struct cw1200_common; + +/* Bands */ +/* Radio band 2.412 -2.484 GHz. */ +#define WSM_PHY_BAND_2_4G (0) + +/* Radio band 4.9375-5.8250 GHz. */ +#define WSM_PHY_BAND_5G (1) + +/* Transmit rates */ +/* 1 Mbps ERP-DSSS */ +#define WSM_TRANSMIT_RATE_1 (0) + +/* 2 Mbps ERP-DSSS */ +#define WSM_TRANSMIT_RATE_2 (1) + +/* 5.5 Mbps ERP-CCK */ +#define WSM_TRANSMIT_RATE_5 (2) + +/* 11 Mbps ERP-CCK */ +#define WSM_TRANSMIT_RATE_11 (3) + +/* 22 Mbps ERP-PBCC (Not supported) */ +/* #define WSM_TRANSMIT_RATE_22 (4) */ + +/* 33 Mbps ERP-PBCC (Not supported) */ +/* #define WSM_TRANSMIT_RATE_33 (5) */ + +/* 6 Mbps (3 Mbps) ERP-OFDM, BPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_6 (6) + +/* 9 Mbps (4.5 Mbps) ERP-OFDM, BPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_9 (7) + +/* 12 Mbps (6 Mbps) ERP-OFDM, QPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_12 (8) + +/* 18 Mbps (9 Mbps) ERP-OFDM, QPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_18 (9) + +/* 24 Mbps (12 Mbps) ERP-OFDM, 16QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_24 (10) + +/* 36 Mbps (18 Mbps) ERP-OFDM, 16QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_36 (11) + +/* 48 Mbps (24 Mbps) ERP-OFDM, 64QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_48 (12) + +/* 54 Mbps (27 Mbps) ERP-OFDM, 64QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_54 (13) + +/* 6.5 Mbps HT-OFDM, BPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_6 (14) + +/* 13 Mbps HT-OFDM, QPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_13 (15) + +/* 19.5 Mbps HT-OFDM, QPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_19 (16) + +/* 26 Mbps HT-OFDM, 16QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_26 (17) + +/* 39 Mbps HT-OFDM, 16QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_39 (18) + +/* 52 Mbps HT-OFDM, 64QAM coding rate 2/3 */ +#define WSM_TRANSMIT_RATE_HT_52 (19) + +/* 58.5 Mbps HT-OFDM, 64QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_58 (20) + +/* 65 Mbps HT-OFDM, 64QAM coding rate 5/6 */ +#define WSM_TRANSMIT_RATE_HT_65 (21) + +/* Scan types */ +/* Foreground scan */ +#define WSM_SCAN_TYPE_FOREGROUND (0) + +/* Background scan */ +#define WSM_SCAN_TYPE_BACKGROUND (1) + +/* Auto scan */ +#define WSM_SCAN_TYPE_AUTO (2) + +/* Scan flags */ +/* Forced background scan means if the station cannot */ +/* enter the power-save mode, it shall force to perform a */ +/* background scan. Only valid when ScanType is */ +/* background scan. */ +#define WSM_SCAN_FLAG_FORCE_BACKGROUND (BIT(0)) + +/* The WLAN device scans one channel at a time so */ +/* that disturbance to the data traffic is minimized. */ +#define WSM_SCAN_FLAG_SPLIT_METHOD (BIT(1)) + +/* Preamble Type. Long if not set. */ +#define WSM_SCAN_FLAG_SHORT_PREAMBLE (BIT(2)) + +/* 11n Tx Mode. Mixed if not set. */ +#define WSM_SCAN_FLAG_11N_GREENFIELD (BIT(3)) + +/* Scan constraints */ +/* Maximum number of channels to be scanned. */ +#define WSM_SCAN_MAX_NUM_OF_CHANNELS (48) + +/* The maximum number of SSIDs that the device can scan for. */ +#define WSM_SCAN_MAX_NUM_OF_SSIDS (2) + +/* Power management modes */ +/* 802.11 Active mode */ +#define WSM_PSM_ACTIVE (0) + +/* 802.11 PS mode */ +#define WSM_PSM_PS BIT(0) + +/* Fast Power Save bit */ +#define WSM_PSM_FAST_PS_FLAG BIT(7) + +/* Dynamic aka Fast power save */ +#define WSM_PSM_FAST_PS (BIT(0) | BIT(7)) + +/* Undetermined */ +/* Note : Undetermined status is reported when the */ +/* NULL data frame used to advertise the PM mode to */ +/* the AP at Pre or Post Background Scan is not Acknowledged */ +#define WSM_PSM_UNKNOWN BIT(1) + +/* Queue IDs */ +/* best effort/legacy */ +#define WSM_QUEUE_BEST_EFFORT (0) + +/* background */ +#define WSM_QUEUE_BACKGROUND (1) + +/* video */ +#define WSM_QUEUE_VIDEO (2) + +/* voice */ +#define WSM_QUEUE_VOICE (3) + +/* HT TX parameters */ +/* Non-HT */ +#define WSM_HT_TX_NON_HT (0) + +/* Mixed format */ +#define WSM_HT_TX_MIXED (1) + +/* Greenfield format */ +#define WSM_HT_TX_GREENFIELD (2) + +/* STBC allowed */ +#define WSM_HT_TX_STBC (BIT(7)) + +/* EPTA prioirty flags for BT Coex */ +/* default epta priority */ +#define WSM_EPTA_PRIORITY_DEFAULT 4 +/* use for normal data */ +#define WSM_EPTA_PRIORITY_DATA 4 +/* use for connect/disconnect/roaming*/ +#define WSM_EPTA_PRIORITY_MGT 5 +/* use for action frames */ +#define WSM_EPTA_PRIORITY_ACTION 5 +/* use for AC_VI data */ +#define WSM_EPTA_PRIORITY_VIDEO 5 +/* use for AC_VO data */ +#define WSM_EPTA_PRIORITY_VOICE 6 +/* use for EAPOL exchange */ +#define WSM_EPTA_PRIORITY_EAPOL 7 + +/* TX status */ +/* Frame was sent aggregated */ +/* Only valid for WSM_SUCCESS status. */ +#define WSM_TX_STATUS_AGGREGATION (BIT(0)) + +/* Host should requeue this frame later. */ +/* Valid only when status is WSM_REQUEUE. */ +#define WSM_TX_STATUS_REQUEUE (BIT(1)) + +/* Normal Ack */ +#define WSM_TX_STATUS_NORMAL_ACK (0<<2) + +/* No Ack */ +#define WSM_TX_STATUS_NO_ACK (1<<2) + +/* No explicit acknowledgement */ +#define WSM_TX_STATUS_NO_EXPLICIT_ACK (2<<2) + +/* Block Ack */ +/* Only valid for WSM_SUCCESS status. */ +#define WSM_TX_STATUS_BLOCK_ACK (3<<2) + +/* RX status */ +/* Unencrypted */ +#define WSM_RX_STATUS_UNENCRYPTED (0<<0) + +/* WEP */ +#define WSM_RX_STATUS_WEP (1<<0) + +/* TKIP */ +#define WSM_RX_STATUS_TKIP (2<<0) + +/* AES */ +#define WSM_RX_STATUS_AES (3<<0) + +/* WAPI */ +#define WSM_RX_STATUS_WAPI (4<<0) + +/* Macro to fetch encryption subfield. */ +#define WSM_RX_STATUS_ENCRYPTION(status) ((status) & 0x07) + +/* Frame was part of an aggregation */ +#define WSM_RX_STATUS_AGGREGATE (BIT(3)) + +/* Frame was first in the aggregation */ +#define WSM_RX_STATUS_AGGREGATE_FIRST (BIT(4)) + +/* Frame was last in the aggregation */ +#define WSM_RX_STATUS_AGGREGATE_LAST (BIT(5)) + +/* Indicates a defragmented frame */ +#define WSM_RX_STATUS_DEFRAGMENTED (BIT(6)) + +/* Indicates a Beacon frame */ +#define WSM_RX_STATUS_BEACON (BIT(7)) + +/* Indicates STA bit beacon TIM field */ +#define WSM_RX_STATUS_TIM (BIT(8)) + +/* Indicates Beacon frame's virtual bitmap contains multicast bit */ +#define WSM_RX_STATUS_MULTICAST (BIT(9)) + +/* Indicates frame contains a matching SSID */ +#define WSM_RX_STATUS_MATCHING_SSID (BIT(10)) + +/* Indicates frame contains a matching BSSI */ +#define WSM_RX_STATUS_MATCHING_BSSI (BIT(11)) + +/* Indicates More bit set in Framectl field */ +#define WSM_RX_STATUS_MORE_DATA (BIT(12)) + +/* Indicates frame received during a measurement process */ +#define WSM_RX_STATUS_MEASUREMENT (BIT(13)) + +/* Indicates frame received as an HT packet */ +#define WSM_RX_STATUS_HT (BIT(14)) + +/* Indicates frame received with STBC */ +#define WSM_RX_STATUS_STBC (BIT(15)) + +/* Indicates Address 1 field matches dot11StationId */ +#define WSM_RX_STATUS_ADDRESS1 (BIT(16)) + +/* Indicates Group address present in the Address 1 field */ +#define WSM_RX_STATUS_GROUP (BIT(17)) + +/* Indicates Broadcast address present in the Address 1 field */ +#define WSM_RX_STATUS_BROADCAST (BIT(18)) + +/* Indicates group key used with encrypted frames */ +#define WSM_RX_STATUS_GROUP_KEY (BIT(19)) + +/* Macro to fetch encryption key index. */ +#define WSM_RX_STATUS_KEY_IDX(status) (((status >> 20)) & 0x0F) + +/* Indicates TSF inclusion after 802.11 frame body */ +#define WSM_RX_STATUS_TSF_INCLUDED (BIT(24)) + +/* Frame Control field starts at Frame offset + 2 */ +#define WSM_TX_2BYTES_SHIFT (BIT(7)) + +/* Join mode */ +/* IBSS */ +#define WSM_JOIN_MODE_IBSS (0) + +/* BSS */ +#define WSM_JOIN_MODE_BSS (1) + +/* PLCP preamble type */ +/* For long preamble */ +#define WSM_JOIN_PREAMBLE_LONG (0) + +/* For short preamble (Long for 1Mbps) */ +#define WSM_JOIN_PREAMBLE_SHORT (1) + +/* For short preamble (Long for 1 and 2Mbps) */ +#define WSM_JOIN_PREAMBLE_SHORT_2 (2) + +/* Join flags */ +/* Unsynchronized */ +#define WSM_JOIN_FLAGS_UNSYNCRONIZED BIT(0) +/* The BSS owner is a P2P GO */ +#define WSM_JOIN_FLAGS_P2P_GO BIT(1) +/* Force to join BSS with the BSSID and the + * SSID specified without waiting for beacons. The + * ProbeForJoin parameter is ignored. */ +#define WSM_JOIN_FLAGS_FORCE BIT(2) +/* Give probe request/response higher + * priority over the BT traffic */ +#define WSM_JOIN_FLAGS_PRIO BIT(3) +/* Issue immediate join confirmation and use + * join complete to notify about completion */ +#define WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND BIT(5) + +/* Key types */ +#define WSM_KEY_TYPE_WEP_DEFAULT (0) +#define WSM_KEY_TYPE_WEP_PAIRWISE (1) +#define WSM_KEY_TYPE_TKIP_GROUP (2) +#define WSM_KEY_TYPE_TKIP_PAIRWISE (3) +#define WSM_KEY_TYPE_AES_GROUP (4) +#define WSM_KEY_TYPE_AES_PAIRWISE (5) +#define WSM_KEY_TYPE_WAPI_GROUP (6) +#define WSM_KEY_TYPE_WAPI_PAIRWISE (7) + +/* Key indexes */ +#define WSM_KEY_MAX_INDEX (10) + +/* ACK policy */ +#define WSM_ACK_POLICY_NORMAL (0) +#define WSM_ACK_POLICY_NO_ACK (1) + +/* Start modes */ +#define WSM_START_MODE_AP (0) /* Mini AP */ +#define WSM_START_MODE_P2P_GO (1) /* P2P GO */ +#define WSM_START_MODE_P2P_DEV (2) /* P2P device */ + +/* SetAssociationMode MIB flags */ +#define WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE (BIT(0)) +#define WSM_ASSOCIATION_MODE_USE_HT_MODE (BIT(1)) +#define WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET (BIT(2)) +#define WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING (BIT(3)) +#define WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES (BIT(4)) + +/* RcpiRssiThreshold MIB flags */ +#define WSM_RCPI_RSSI_THRESHOLD_ENABLE (BIT(0)) +#define WSM_RCPI_RSSI_USE_RSSI (BIT(1)) +#define WSM_RCPI_RSSI_DONT_USE_UPPER (BIT(2)) +#define WSM_RCPI_RSSI_DONT_USE_LOWER (BIT(3)) + +/* Update-ie constants */ +#define WSM_UPDATE_IE_BEACON (BIT(0)) +#define WSM_UPDATE_IE_PROBE_RESP (BIT(1)) +#define WSM_UPDATE_IE_PROBE_REQ (BIT(2)) + +/* WSM events */ +/* Error */ +#define WSM_EVENT_ERROR (0) + +/* BSS lost */ +#define WSM_EVENT_BSS_LOST (1) + +/* BSS regained */ +#define WSM_EVENT_BSS_REGAINED (2) + +/* Radar detected */ +#define WSM_EVENT_RADAR_DETECTED (3) + +/* RCPI or RSSI threshold triggered */ +#define WSM_EVENT_RCPI_RSSI (4) + +/* BT inactive */ +#define WSM_EVENT_BT_INACTIVE (5) + +/* BT active */ +#define WSM_EVENT_BT_ACTIVE (6) + +/* MIB IDs */ +/* 4.1 dot11StationId */ +#define WSM_MIB_ID_DOT11_STATION_ID 0x0000 + +/* 4.2 dot11MaxtransmitMsduLifeTime */ +#define WSM_MIB_ID_DOT11_MAX_TRANSMIT_LIFTIME 0x0001 + +/* 4.3 dot11MaxReceiveLifeTime */ +#define WSM_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME 0x0002 + +/* 4.4 dot11SlotTime */ +#define WSM_MIB_ID_DOT11_SLOT_TIME 0x0003 + +/* 4.5 dot11GroupAddressesTable */ +#define WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE 0x0004 +#define WSM_MAX_GRP_ADDRTABLE_ENTRIES 8 + +/* 4.6 dot11WepDefaultKeyId */ +#define WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID 0x0005 + +/* 4.7 dot11CurrentTxPowerLevel */ +#define WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL 0x0006 + +/* 4.8 dot11RTSThreshold */ +#define WSM_MIB_ID_DOT11_RTS_THRESHOLD 0x0007 + +/* 4.9 NonErpProtection */ +#define WSM_MIB_ID_NON_ERP_PROTECTION 0x1000 + +/* 4.10 ArpIpAddressesTable */ +#define WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE 0x1001 +#define WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES 1 + +/* 4.11 TemplateFrame */ +#define WSM_MIB_ID_TEMPLATE_FRAME 0x1002 + +/* 4.12 RxFilter */ +#define WSM_MIB_ID_RX_FILTER 0x1003 + +/* 4.13 BeaconFilterTable */ +#define WSM_MIB_ID_BEACON_FILTER_TABLE 0x1004 + +/* 4.14 BeaconFilterEnable */ +#define WSM_MIB_ID_BEACON_FILTER_ENABLE 0x1005 + +/* 4.15 OperationalPowerMode */ +#define WSM_MIB_ID_OPERATIONAL_POWER_MODE 0x1006 + +/* 4.16 BeaconWakeUpPeriod */ +#define WSM_MIB_ID_BEACON_WAKEUP_PERIOD 0x1007 + +/* 4.17 RcpiRssiThreshold */ +#define WSM_MIB_ID_RCPI_RSSI_THRESHOLD 0x1009 + +/* 4.18 StatisticsTable */ +#define WSM_MIB_ID_STATISTICS_TABLE 0x100A + +/* 4.19 IbssPsConfig */ +#define WSM_MIB_ID_IBSS_PS_CONFIG 0x100B + +/* 4.20 CountersTable */ +#define WSM_MIB_ID_COUNTERS_TABLE 0x100C + +/* 4.21 BlockAckPolicy */ +#define WSM_MIB_ID_BLOCK_ACK_POLICY 0x100E + +/* 4.22 OverrideInternalTxRate */ +#define WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE 0x100F + +/* 4.23 SetAssociationMode */ +#define WSM_MIB_ID_SET_ASSOCIATION_MODE 0x1010 + +/* 4.24 UpdateEptaConfigData */ +#define WSM_MIB_ID_UPDATE_EPTA_CONFIG_DATA 0x1011 + +/* 4.25 SelectCcaMethod */ +#define WSM_MIB_ID_SELECT_CCA_METHOD 0x1012 + +/* 4.26 SetUpasdInformation */ +#define WSM_MIB_ID_SET_UAPSD_INFORMATION 0x1013 + +/* 4.27 SetAutoCalibrationMode WBF00004073 */ +#define WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE 0x1015 + +/* 4.28 SetTxRateRetryPolicy */ +#define WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY 0x1016 + +/* 4.29 SetHostMessageTypeFilter */ +#define WSM_MIB_ID_SET_HOST_MSG_TYPE_FILTER 0x1017 + +/* 4.30 P2PFindInfo */ +#define WSM_MIB_ID_P2P_FIND_INFO 0x1018 + +/* 4.31 P2PPsModeInfo */ +#define WSM_MIB_ID_P2P_PS_MODE_INFO 0x1019 + +/* 4.32 SetEtherTypeDataFrameFilter */ +#define WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER 0x101A + +/* 4.33 SetUDPPortDataFrameFilter */ +#define WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER 0x101B + +/* 4.34 SetMagicDataFrameFilter */ +#define WSM_MIB_ID_SET_MAGIC_DATAFRAME_FILTER 0x101C + +/* 4.35 P2PDeviceInfo */ +#define WSM_MIB_ID_P2P_DEVICE_INFO 0x101D + +/* 4.36 SetWCDMABand */ +#define WSM_MIB_ID_SET_WCDMA_BAND 0x101E + +/* 4.37 GroupTxSequenceCounter */ +#define WSM_MIB_ID_GRP_SEQ_COUNTER 0x101F + +/* 4.38 ProtectedMgmtPolicy */ +#define WSM_MIB_ID_PROTECTED_MGMT_POLICY 0x1020 + +/* 4.39 SetHtProtection */ +#define WSM_MIB_ID_SET_HT_PROTECTION 0x1021 + +/* 4.40 GPIO Command */ +#define WSM_MIB_ID_GPIO_COMMAND 0x1022 + +/* 4.41 TSF Counter Value */ +#define WSM_MIB_ID_TSF_COUNTER 0x1023 + +/* Test Purposes Only */ +#define WSM_MIB_ID_BLOCK_ACK_INFO 0x100D + +/* 4.42 UseMultiTxConfMessage */ +#define WSM_MIB_USE_MULTI_TX_CONF 0x1024 + +/* 4.43 Keep-alive period */ +#define WSM_MIB_ID_KEEP_ALIVE_PERIOD 0x1025 + +/* 4.44 Disable BSSID filter */ +#define WSM_MIB_ID_DISABLE_BSSID_FILTER 0x1026 + +/* Frame template types */ +#define WSM_FRAME_TYPE_PROBE_REQUEST (0) +#define WSM_FRAME_TYPE_BEACON (1) +#define WSM_FRAME_TYPE_NULL (2) +#define WSM_FRAME_TYPE_QOS_NULL (3) +#define WSM_FRAME_TYPE_PS_POLL (4) +#define WSM_FRAME_TYPE_PROBE_RESPONSE (5) + +#define WSM_FRAME_GREENFIELD (0x80) /* See 4.11 */ + +/* Status */ +/* The WSM firmware has completed a request */ +/* successfully. */ +#define WSM_STATUS_SUCCESS (0) + +/* This is a generic failure code if other error codes do */ +/* not apply. */ +#define WSM_STATUS_FAILURE (1) + +/* A request contains one or more invalid parameters. */ +#define WSM_INVALID_PARAMETER (2) + +/* The request cannot perform because the device is in */ +/* an inappropriate mode. */ +#define WSM_ACCESS_DENIED (3) + +/* The frame received includes a decryption error. */ +#define WSM_STATUS_DECRYPTFAILURE (4) + +/* A MIC failure is detected in the received packets. */ +#define WSM_STATUS_MICFAILURE (5) + +/* The transmit request failed due to retry limit being */ +/* exceeded. */ +#define WSM_STATUS_RETRY_EXCEEDED (6) + +/* The transmit request failed due to MSDU life time */ +/* being exceeded. */ +#define WSM_STATUS_TX_LIFETIME_EXCEEDED (7) + +/* The link to the AP is lost. */ +#define WSM_STATUS_LINK_LOST (8) + +/* No key was found for the encrypted frame */ +#define WSM_STATUS_NO_KEY_FOUND (9) + +/* Jammer was detected when transmitting this frame */ +#define WSM_STATUS_JAMMER_DETECTED (10) + +/* The message should be requeued later. */ +/* This is applicable only to Transmit */ +#define WSM_REQUEUE (11) + +/* Advanced filtering options */ +#define WSM_MAX_FILTER_ELEMENTS (4) + +#define WSM_FILTER_ACTION_IGNORE (0) +#define WSM_FILTER_ACTION_FILTER_IN (1) +#define WSM_FILTER_ACTION_FILTER_OUT (2) + +#define WSM_FILTER_PORT_TYPE_DST (0) +#define WSM_FILTER_PORT_TYPE_SRC (1) + +/* Actual header of WSM messages */ +struct wsm_hdr { + __le16 len; + __le16 id; +}; + +#define WSM_TX_SEQ_MAX (7) +#define WSM_TX_SEQ(seq) \ + ((seq & WSM_TX_SEQ_MAX) << 13) +#define WSM_TX_LINK_ID_MAX (0x0F) +#define WSM_TX_LINK_ID(link_id) \ + ((link_id & WSM_TX_LINK_ID_MAX) << 6) + +#define MAX_BEACON_SKIP_TIME_MS 1000 + +#define WSM_CMD_LAST_CHANCE_TIMEOUT (HZ * 3 / 2) + +/* ******************************************************************** */ +/* WSM capability */ + +#define WSM_STARTUP_IND_ID 0x0801 + +struct wsm_startup_ind { + u16 input_buffers; + u16 input_buffer_size; + u16 status; + u16 hw_id; + u16 hw_subid; + u16 fw_cap; + u16 fw_type; + u16 fw_api; + u16 fw_build; + u16 fw_ver; + char fw_label[128]; + u32 config[4]; +}; + +/* ******************************************************************** */ +/* WSM commands */ + +/* 3.1 */ +#define WSM_CONFIGURATION_REQ_ID 0x0009 +#define WSM_CONFIGURATION_RESP_ID 0x0409 + +struct wsm_tx_power_range { + int min_power_level; + int max_power_level; + u32 stepping; +}; + +struct wsm_configuration { + /* [in] */ u32 dot11MaxTransmitMsduLifeTime; + /* [in] */ u32 dot11MaxReceiveLifeTime; + /* [in] */ u32 dot11RtsThreshold; + /* [in, out] */ u8 *dot11StationId; + /* [in] */ const void *dpdData; + /* [in] */ size_t dpdData_size; + /* [out] */ u8 dot11FrequencyBandsSupported; + /* [out] */ u32 supportedRateMask; + /* [out] */ struct wsm_tx_power_range txPowerRange[2]; +}; + +int wsm_configuration(struct cw1200_common *priv, + struct wsm_configuration *arg); + +/* 3.3 */ +#define WSM_RESET_REQ_ID 0x000A +#define WSM_RESET_RESP_ID 0x040A +struct wsm_reset { + /* [in] */ int link_id; + /* [in] */ bool reset_statistics; +}; + +int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg); + +/* 3.5 */ +#define WSM_READ_MIB_REQ_ID 0x0005 +#define WSM_READ_MIB_RESP_ID 0x0405 +int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *buf, + size_t buf_size); + +/* 3.7 */ +#define WSM_WRITE_MIB_REQ_ID 0x0006 +#define WSM_WRITE_MIB_RESP_ID 0x0406 +int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *buf, + size_t buf_size); + +/* 3.9 */ +#define WSM_START_SCAN_REQ_ID 0x0007 +#define WSM_START_SCAN_RESP_ID 0x0407 + +struct wsm_ssid { + u8 ssid[32]; + u32 length; +}; + +struct wsm_scan_ch { + u16 number; + u32 min_chan_time; + u32 max_chan_time; + u32 tx_power_level; +}; + +struct wsm_scan { + /* WSM_PHY_BAND_... */ + u8 band; + + /* WSM_SCAN_TYPE_... */ + u8 type; + + /* WSM_SCAN_FLAG_... */ + u8 flags; + + /* WSM_TRANSMIT_RATE_... */ + u8 max_tx_rate; + + /* Interval period in TUs that the device shall the re- */ + /* execute the requested scan. Max value supported by the device */ + /* is 256s. */ + u32 auto_scan_interval; + + /* Number of probe requests (per SSID) sent to one (1) */ + /* channel. Zero (0) means that none is send, which */ + /* means that a passive scan is to be done. Value */ + /* greater than zero (0) means that an active scan is to */ + /* be done. */ + u32 num_probes; + + /* Number of channels to be scanned. */ + /* Maximum value is WSM_SCAN_MAX_NUM_OF_CHANNELS. */ + u8 num_channels; + + /* Number of SSID provided in the scan command (this */ + /* is zero (0) in broadcast scan) */ + /* The maximum number of SSIDs is WSM_SCAN_MAX_NUM_OF_SSIDS. */ + u8 num_ssids; + + /* The delay time (in microseconds) period */ + /* before sending a probe-request. */ + u8 probe_delay; + + /* SSIDs to be scanned [numOfSSIDs]; */ + struct wsm_ssid *ssids; + + /* Channels to be scanned [numOfChannels]; */ + struct wsm_scan_ch *ch; +}; + +int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg); + +/* 3.11 */ +#define WSM_STOP_SCAN_REQ_ID 0x0008 +#define WSM_STOP_SCAN_RESP_ID 0x0408 +int wsm_stop_scan(struct cw1200_common *priv); + +/* 3.13 */ +#define WSM_SCAN_COMPLETE_IND_ID 0x0806 +struct wsm_scan_complete { + /* WSM_STATUS_... */ + u32 status; + + /* WSM_PSM_... */ + u8 psm; + + /* Number of channels that the scan operation completed. */ + u8 num_channels; +}; + +/* 3.14 */ +#define WSM_TX_CONFIRM_IND_ID 0x0404 +#define WSM_MULTI_TX_CONFIRM_ID 0x041E + +struct wsm_tx_confirm { + /* Packet identifier used in wsm_tx. */ + u32 packet_id; + + /* WSM_STATUS_... */ + u32 status; + + /* WSM_TRANSMIT_RATE_... */ + u8 tx_rate; + + /* The number of times the frame was transmitted */ + /* without receiving an acknowledgement. */ + u8 ack_failures; + + /* WSM_TX_STATUS_... */ + u16 flags; + + /* The total time in microseconds that the frame spent in */ + /* the WLAN device before transmission as completed. */ + u32 media_delay; + + /* The total time in microseconds that the frame spent in */ + /* the WLAN device before transmission was started. */ + u32 tx_queue_delay; +}; + +/* 3.15 */ +typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv, + struct wsm_tx_confirm *arg); + +/* Note that ideology of wsm_tx struct is different against the rest of + * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input + * argument for WSM call, but a prepared bytestream to be sent to firmware. + * It is filled partly in cw1200_tx, partly in low-level WSM code. + * Please pay attention once again: ideology is different. + * + * Legend: + * - [in]: cw1200_tx must fill this field. + * - [wsm]: the field is filled by low-level WSM. + */ +struct wsm_tx { + /* common WSM header */ + struct wsm_hdr hdr; + + /* Packet identifier that meant to be used in completion. */ + __le32 packet_id; + + /* WSM_TRANSMIT_RATE_... */ + u8 max_tx_rate; + + /* WSM_QUEUE_... */ + u8 queue_id; + + /* True: another packet is pending on the host for transmission. */ + u8 more; + + /* Bit 0 = 0 - Start expiry time from first Tx attempt (default) */ + /* Bit 0 = 1 - Start expiry time from receipt of Tx Request */ + /* Bits 3:1 - PTA Priority */ + /* Bits 6:4 - Tx Rate Retry Policy */ + /* Bit 7 - Reserved */ + u8 flags; + + /* Should be 0. */ + __le32 reserved; + + /* The elapsed time in TUs, after the initial transmission */ + /* of an MSDU, after which further attempts to transmit */ + /* the MSDU shall be terminated. Overrides the global */ + /* dot11MaxTransmitMsduLifeTime setting [optional] */ + /* Device will set the default value if this is 0. */ + __le32 expire_time; + + /* WSM_HT_TX_... */ + __le32 ht_tx_parameters; +}; + +/* = sizeof(generic hi hdr) + sizeof(wsm hdr) + sizeof(alignment) */ +#define WSM_TX_EXTRA_HEADROOM (28) + +/* 3.16 */ +#define WSM_RECEIVE_IND_ID 0x0804 + +struct wsm_rx { + /* WSM_STATUS_... */ + __le32 status; + + /* Specifies the channel of the received packet. */ + __le16 channel_number; + + /* WSM_TRANSMIT_RATE_... */ + u8 rx_rate; + + /* This value is expressed in signed Q8.0 format for */ + /* RSSI and unsigned Q7.1 format for RCPI. */ + u8 rcpi_rssi; + + /* WSM_RX_STATUS_... */ + __le32 flags; + + /* Payload */ + u8 data[0]; +} __packed; + +/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */ +#define WSM_RX_EXTRA_HEADROOM (16) + +typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg, + struct sk_buff **skb_p); + +/* 3.17 */ +struct wsm_event { + /* WSM_STATUS_... */ + /* [out] */ u32 id; + + /* Indication parameters. */ + /* For error indication, this shall be a 32-bit WSM status. */ + /* For RCPI or RSSI indication, this should be an 8-bit */ + /* RCPI or RSSI value. */ + /* [out] */ u32 data; +}; + +struct cw1200_wsm_event { + struct list_head link; + struct wsm_event evt; +}; + +/* 3.18 - 3.22 */ +/* Measurement. Skipped for now. Irrelevent. */ + +typedef void (*wsm_event_cb) (struct cw1200_common *priv, + struct wsm_event *arg); + +/* 3.23 */ +#define WSM_JOIN_REQ_ID 0x000B +#define WSM_JOIN_RESP_ID 0x040B + +struct wsm_join { + /* WSM_JOIN_MODE_... */ + u8 mode; + + /* WSM_PHY_BAND_... */ + u8 band; + + /* Specifies the channel number to join. The channel */ + /* number will be mapped to an actual frequency */ + /* according to the band */ + u16 channel_number; + + /* Specifies the BSSID of the BSS or IBSS to be joined */ + /* or the IBSS to be started. */ + u8 bssid[6]; + + /* ATIM window of IBSS */ + /* When ATIM window is zero the initiated IBSS does */ + /* not support power saving. */ + u16 atim_window; + + /* WSM_JOIN_PREAMBLE_... */ + u8 preamble_type; + + /* Specifies if a probe request should be send with the */ + /* specified SSID when joining to the network. */ + u8 probe_for_join; + + /* DTIM Period (In multiples of beacon interval) */ + u8 dtim_period; + + /* WSM_JOIN_FLAGS_... */ + u8 flags; + + /* Length of the SSID */ + u32 ssid_len; + + /* Specifies the SSID of the IBSS to join or start */ + u8 ssid[32]; + + /* Specifies the time between TBTTs in TUs */ + u32 beacon_interval; + + /* A bit mask that defines the BSS basic rate set. */ + u32 basic_rate_set; +}; + +struct wsm_join_cnf { + u32 status; + + /* Minimum transmission power level in units of 0.1dBm */ + u32 min_power_level; + + /* Maximum transmission power level in units of 0.1dBm */ + u32 max_power_level; +}; + +int wsm_join(struct cw1200_common *priv, struct wsm_join *arg); + +/* 3.24 */ +struct wsm_join_complete { + /* WSM_STATUS_... */ + u32 status; +}; + +/* 3.25 */ +#define WSM_SET_PM_REQ_ID 0x0010 +#define WSM_SET_PM_RESP_ID 0x0410 +struct wsm_set_pm { + /* WSM_PSM_... */ + u8 mode; + + /* in unit of 500us; 0 to use default */ + u8 fast_psm_idle_period; + + /* in unit of 500us; 0 to use default */ + u8 ap_psm_change_period; + + /* in unit of 500us; 0 to disable auto-pspoll */ + u8 min_auto_pspoll_period; +}; + +int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg); + +/* 3.27 */ +struct wsm_set_pm_complete { + u8 psm; /* WSM_PSM_... */ +}; + +/* 3.28 */ +#define WSM_SET_BSS_PARAMS_REQ_ID 0x0011 +#define WSM_SET_BSS_PARAMS_RESP_ID 0x0411 +struct wsm_set_bss_params { + /* This resets the beacon loss counters only */ + u8 reset_beacon_loss; + + /* The number of lost consecutive beacons after which */ + /* the WLAN device should indicate the BSS-Lost event */ + /* to the WLAN host driver. */ + u8 beacon_lost_count; + + /* The AID received during the association process. */ + u16 aid; + + /* The operational rate set mask */ + u32 operational_rate_set; +}; + +int wsm_set_bss_params(struct cw1200_common *priv, + const struct wsm_set_bss_params *arg); + +/* 3.30 */ +#define WSM_ADD_KEY_REQ_ID 0x000C +#define WSM_ADD_KEY_RESP_ID 0x040C +struct wsm_add_key { + u8 type; /* WSM_KEY_TYPE_... */ + u8 index; /* Key entry index: 0 -- WSM_KEY_MAX_INDEX */ + u16 reserved; + union { + struct { + u8 peer[6]; /* MAC address of the + * peer station */ + u8 reserved; + u8 keylen; /* Key length in bytes */ + u8 keydata[16]; /* Key data */ + } __packed wep_pairwise; + struct { + u8 keyid; /* Unique per key identifier + * (0..3) */ + u8 keylen; /* Key length in bytes */ + u16 reserved; + u8 keydata[16]; /* Key data */ + } __packed wep_group; + struct { + u8 peer[6]; /* MAC address of the + * peer station */ + u16 reserved; + u8 keydata[16]; /* TKIP key data */ + u8 rx_mic_key[8]; /* Rx MIC key */ + u8 tx_mic_key[8]; /* Tx MIC key */ + } __packed tkip_pairwise; + struct { + u8 keydata[16]; /* TKIP key data */ + u8 rx_mic_key[8]; /* Rx MIC key */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + u8 rx_seqnum[8]; /* Receive Sequence Counter */ + } __packed tkip_group; + struct { + u8 peer[6]; /* MAC address of the + * peer station */ + u16 reserved; + u8 keydata[16]; /* AES key data */ + } __packed aes_pairwise; + struct { + u8 keydata[16]; /* AES key data */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + u8 rx_seqnum[8]; /* Receive Sequence Counter */ + } __packed aes_group; + struct { + u8 peer[6]; /* MAC address of the + * peer station */ + u8 keyid; /* Key ID */ + u8 reserved; + u8 keydata[16]; /* WAPI key data */ + u8 mic_key[16]; /* MIC key data */ + } __packed wapi_pairwise; + struct { + u8 keydata[16]; /* WAPI key data */ + u8 mic_key[16]; /* MIC key data */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + } __packed wapi_group; + } __packed; +} __packed; + +int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg); + +/* 3.32 */ +#define WSM_REMOVE_KEY_REQ_ID 0x000D +#define WSM_REMOVE_KEY_RESP_ID 0x040D +struct wsm_remove_key { + u8 index; /* Key entry index : 0-10 */ +}; + +int wsm_remove_key(struct cw1200_common *priv, + const struct wsm_remove_key *arg); + +/* 3.34 */ +struct wsm_set_tx_queue_params { + /* WSM_ACK_POLICY_... */ + u8 ackPolicy; + + /* Medium Time of TSPEC (in 32us units) allowed per */ + /* One Second Averaging Period for this queue. */ + u16 allowedMediumTime; + + /* dot11MaxTransmitMsduLifetime to be used for the */ + /* specified queue. */ + u32 maxTransmitLifetime; +}; + +struct wsm_tx_queue_params { + /* NOTE: index is a linux queue id. */ + struct wsm_set_tx_queue_params params[4]; +}; + + +#define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\ + max_life_time) \ +do { \ + struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \ + p->ackPolicy = (ack_policy); \ + p->allowedMediumTime = (allowed_time); \ + p->maxTransmitLifetime = (max_life_time); \ +} while (0) + +int wsm_set_tx_queue_params(struct cw1200_common *priv, + const struct wsm_set_tx_queue_params *arg, u8 id); + +/* 3.36 */ +#define WSM_EDCA_PARAMS_REQ_ID 0x0013 +#define WSM_EDCA_PARAMS_RESP_ID 0x0413 +struct wsm_edca_queue_params { + /* CWmin (in slots) for the access class. */ + __le16 cwmin; + + /* CWmax (in slots) for the access class. */ + __le16 cwmax; + + /* AIFS (in slots) for the access class. */ + __le16 aifns; + + /* TX OP Limit (in microseconds) for the access class. */ + __le16 txop_limit; + + /* dot11MaxReceiveLifetime to be used for the specified */ + /* the access class. Overrides the global */ + /* dot11MaxReceiveLifetime value */ + __le32 max_rx_lifetime; +} __packed; + +struct wsm_edca_params { + /* NOTE: index is a linux queue id. */ + struct wsm_edca_queue_params params[4]; + bool uapsd_enable[4]; +}; + +#define TXOP_UNIT 32 +#define WSM_EDCA_SET(__edca, __queue, __aifs, __cw_min, __cw_max, __txop, __lifetime,\ + __uapsd) \ + do { \ + struct wsm_edca_queue_params *p = &(__edca)->params[__queue]; \ + p->cwmin = (__cw_min); \ + p->cwmax = (__cw_max); \ + p->aifns = (__aifs); \ + p->txop_limit = ((__txop) * TXOP_UNIT); \ + p->max_rx_lifetime = (__lifetime); \ + (__edca)->uapsd_enable[__queue] = (__uapsd); \ + } while (0) + +int wsm_set_edca_params(struct cw1200_common *priv, + const struct wsm_edca_params *arg); + +int wsm_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg); + +/* 3.38 */ +/* Set-System info. Skipped for now. Irrelevent. */ + +/* 3.40 */ +#define WSM_SWITCH_CHANNEL_REQ_ID 0x0016 +#define WSM_SWITCH_CHANNEL_RESP_ID 0x0416 + +struct wsm_switch_channel { + /* 1 - means the STA shall not transmit any further */ + /* frames until the channel switch has completed */ + u8 mode; + + /* Number of TBTTs until channel switch occurs. */ + /* 0 - indicates switch shall occur at any time */ + /* 1 - occurs immediately before the next TBTT */ + u8 switch_count; + + /* The new channel number to switch to. */ + /* Note this is defined as per section 2.7. */ + u16 channel_number; +}; + +int wsm_switch_channel(struct cw1200_common *priv, + const struct wsm_switch_channel *arg); + +typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv); + +#define WSM_START_REQ_ID 0x0017 +#define WSM_START_RESP_ID 0x0417 + +struct wsm_start { + /* WSM_START_MODE_... */ + /* [in] */ u8 mode; + + /* WSM_PHY_BAND_... */ + /* [in] */ u8 band; + + /* Channel number */ + /* [in] */ u16 channel_number; + + /* Client Traffic window in units of TU */ + /* Valid only when mode == ..._P2P */ + /* [in] */ u32 ct_window; + + /* Interval between two consecutive */ + /* beacon transmissions in TU. */ + /* [in] */ u32 beacon_interval; + + /* DTIM period in terms of beacon intervals */ + /* [in] */ u8 dtim_period; + + /* WSM_JOIN_PREAMBLE_... */ + /* [in] */ u8 preamble; + + /* The delay time (in microseconds) period */ + /* before sending a probe-request. */ + /* [in] */ u8 probe_delay; + + /* Length of the SSID */ + /* [in] */ u8 ssid_len; + + /* SSID of the BSS or P2P_GO to be started now. */ + /* [in] */ u8 ssid[32]; + + /* The basic supported rates for the MiniAP. */ + /* [in] */ u32 basic_rate_set; +}; + +int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg); + +#define WSM_BEACON_TRANSMIT_REQ_ID 0x0018 +#define WSM_BEACON_TRANSMIT_RESP_ID 0x0418 + +struct wsm_beacon_transmit { + /* 1: enable; 0: disable */ + /* [in] */ u8 enable_beaconing; +}; + +int wsm_beacon_transmit(struct cw1200_common *priv, + const struct wsm_beacon_transmit *arg); + +int wsm_start_find(struct cw1200_common *priv); + +int wsm_stop_find(struct cw1200_common *priv); + +typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status); + +struct wsm_suspend_resume { + /* See 3.52 */ + /* Link ID */ + /* [out] */ int link_id; + /* Stop sending further Tx requests down to device for this link */ + /* [out] */ bool stop; + /* Transmit multicast Frames */ + /* [out] */ bool multicast; + /* The AC on which Tx to be suspended /resumed. */ + /* This is applicable only for U-APSD */ + /* WSM_QUEUE_... */ + /* [out] */ int queue; +}; + +typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv, + struct wsm_suspend_resume *arg); + +/* 3.54 Update-IE request. */ +struct wsm_update_ie { + /* WSM_UPDATE_IE_... */ + /* [in] */ u16 what; + /* [in] */ u16 count; + /* [in] */ u8 *ies; + /* [in] */ size_t length; +}; + +int wsm_update_ie(struct cw1200_common *priv, + const struct wsm_update_ie *arg); + +/* 3.56 */ +struct wsm_map_link { + /* MAC address of the remote device */ + /* [in] */ u8 mac_addr[6]; + /* [in] */ u8 link_id; +}; + +int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg); + +/* ******************************************************************** */ +/* MIB shortcats */ + +static inline int wsm_set_output_power(struct cw1200_common *priv, + int power_level) +{ + __le32 val = __cpu_to_le32(power_level); + return wsm_write_mib(priv, WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL, + &val, sizeof(val)); +} + +static inline int wsm_set_beacon_wakeup_period(struct cw1200_common *priv, + unsigned dtim_interval, + unsigned listen_interval) +{ + struct { + u8 numBeaconPeriods; + u8 reserved; + __le16 listenInterval; + } val = { + dtim_interval, 0, __cpu_to_le16(listen_interval) + }; + + if (dtim_interval > 0xFF || listen_interval > 0xFFFF) + return -EINVAL; + else + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_WAKEUP_PERIOD, + &val, sizeof(val)); +} + +struct wsm_rcpi_rssi_threshold { + u8 rssiRcpiMode; /* WSM_RCPI_RSSI_... */ + u8 lowerThreshold; + u8 upperThreshold; + u8 rollingAverageCount; +}; + +static inline int wsm_set_rcpi_rssi_threshold(struct cw1200_common *priv, + struct wsm_rcpi_rssi_threshold *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_RCPI_RSSI_THRESHOLD, arg, + sizeof(*arg)); +} + +struct wsm_mib_counters_table { + __le32 plcp_errors; + __le32 fcs_errors; + __le32 tx_packets; + __le32 rx_packets; + __le32 rx_packet_errors; + __le32 rx_decryption_failures; + __le32 rx_mic_failures; + __le32 rx_no_key_failures; + __le32 tx_multicast_frames; + __le32 tx_frames_success; + __le32 tx_frame_failures; + __le32 tx_frames_retried; + __le32 tx_frames_multi_retried; + __le32 rx_frame_duplicates; + __le32 rts_success; + __le32 rts_failures; + __le32 ack_failures; + __le32 rx_multicast_frames; + __le32 rx_frames_success; + __le32 rx_cmac_icv_errors; + __le32 rx_cmac_replays; + __le32 rx_mgmt_ccmp_replays; +} __packed; + +static inline int wsm_get_counters_table(struct cw1200_common *priv, + struct wsm_mib_counters_table *arg) +{ + return wsm_read_mib(priv, WSM_MIB_ID_COUNTERS_TABLE, + arg, sizeof(*arg)); +} + +static inline int wsm_get_station_id(struct cw1200_common *priv, u8 *mac) +{ + return wsm_read_mib(priv, WSM_MIB_ID_DOT11_STATION_ID, mac, ETH_ALEN); +} + +struct wsm_rx_filter { + bool promiscuous; + bool bssid; + bool fcs; + bool probeResponder; +}; + +static inline int wsm_set_rx_filter(struct cw1200_common *priv, + const struct wsm_rx_filter *arg) +{ + __le32 val = 0; + if (arg->promiscuous) + val |= __cpu_to_le32(BIT(0)); + if (arg->bssid) + val |= __cpu_to_le32(BIT(1)); + if (arg->fcs) + val |= __cpu_to_le32(BIT(2)); + if (arg->probeResponder) + val |= __cpu_to_le32(BIT(3)); + return wsm_write_mib(priv, WSM_MIB_ID_RX_FILTER, &val, sizeof(val)); +} + +int wsm_set_probe_responder(struct cw1200_common *priv, bool enable); + +#define WSM_BEACON_FILTER_IE_HAS_CHANGED BIT(0) +#define WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT BIT(1) +#define WSM_BEACON_FILTER_IE_HAS_APPEARED BIT(2) + +struct wsm_beacon_filter_table_entry { + u8 ie_id; + u8 flags; + u8 oui[3]; + u8 match_data[3]; +} __packed; + +struct wsm_mib_beacon_filter_table { + __le32 num; + struct wsm_beacon_filter_table_entry entry[10]; +} __packed; + +static inline int wsm_set_beacon_filter_table(struct cw1200_common *priv, + struct wsm_mib_beacon_filter_table *ft) +{ + size_t size = __le32_to_cpu(ft->num) * + sizeof(struct wsm_beacon_filter_table_entry) + + sizeof(__le32); + + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_TABLE, ft, size); +} + +#define WSM_BEACON_FILTER_ENABLE BIT(0) /* Enable/disable beacon filtering */ +#define WSM_BEACON_FILTER_AUTO_ERP BIT(1) /* If 1 FW will handle ERP IE changes internally */ + +struct wsm_beacon_filter_control { + int enabled; + int bcn_count; +}; + +static inline int wsm_beacon_filter_control(struct cw1200_common *priv, + struct wsm_beacon_filter_control *arg) +{ + struct { + __le32 enabled; + __le32 bcn_count; + } val; + val.enabled = __cpu_to_le32(arg->enabled); + val.bcn_count = __cpu_to_le32(arg->bcn_count); + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_ENABLE, &val, + sizeof(val)); +} + +enum wsm_power_mode { + wsm_power_mode_active = 0, + wsm_power_mode_doze = 1, + wsm_power_mode_quiescent = 2, +}; + +struct wsm_operational_mode { + enum wsm_power_mode power_mode; + int disable_more_flag_usage; + int perform_ant_diversity; +}; + +static inline int wsm_set_operational_mode(struct cw1200_common *priv, + const struct wsm_operational_mode *arg) +{ + u8 val = arg->power_mode; + if (arg->disable_more_flag_usage) + val |= BIT(4); + if (arg->perform_ant_diversity) + val |= BIT(5); + return wsm_write_mib(priv, WSM_MIB_ID_OPERATIONAL_POWER_MODE, &val, + sizeof(val)); +} + +struct wsm_template_frame { + u8 frame_type; + u8 rate; + struct sk_buff *skb; +}; + +static inline int wsm_set_template_frame(struct cw1200_common *priv, + struct wsm_template_frame *arg) +{ + int ret; + u8 *p = skb_push(arg->skb, 4); + p[0] = arg->frame_type; + p[1] = arg->rate; + ((u16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4); + ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len); + skb_pull(arg->skb, 4); + return ret; +} + + +struct wsm_protected_mgmt_policy { + bool protectedMgmtEnable; + bool unprotectedMgmtFramesAllowed; + bool encryptionForAuthFrame; +}; + +static inline int wsm_set_protected_mgmt_policy(struct cw1200_common *priv, + struct wsm_protected_mgmt_policy *arg) +{ + __le32 val = 0; + int ret; + if (arg->protectedMgmtEnable) + val |= __cpu_to_le32(BIT(0)); + if (arg->unprotectedMgmtFramesAllowed) + val |= __cpu_to_le32(BIT(1)); + if (arg->encryptionForAuthFrame) + val |= __cpu_to_le32(BIT(2)); + ret = wsm_write_mib(priv, WSM_MIB_ID_PROTECTED_MGMT_POLICY, + &val, sizeof(val)); + return ret; +} + +struct wsm_mib_block_ack_policy { + u8 tx_tid; + u8 reserved1; + u8 rx_tid; + u8 reserved2; +} __packed; + +static inline int wsm_set_block_ack_policy(struct cw1200_common *priv, + u8 tx_tid_policy, + u8 rx_tid_policy) +{ + struct wsm_mib_block_ack_policy val = { + .tx_tid = tx_tid_policy, + .rx_tid = rx_tid_policy, + }; + return wsm_write_mib(priv, WSM_MIB_ID_BLOCK_ACK_POLICY, &val, + sizeof(val)); +} + +struct wsm_mib_association_mode { + u8 flags; /* WSM_ASSOCIATION_MODE_... */ + u8 preamble; /* WSM_JOIN_PREAMBLE_... */ + u8 greenfield; /* 1 for greenfield */ + u8 mpdu_start_spacing; + __le32 basic_rate_set; +} __packed; + +static inline int wsm_set_association_mode(struct cw1200_common *priv, + struct wsm_mib_association_mode *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_SET_ASSOCIATION_MODE, arg, + sizeof(*arg)); +} + +#define WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED BIT(2) +#define WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT BIT(3) +struct wsm_tx_rate_retry_policy { + u8 index; + u8 short_retries; + u8 long_retries; + /* BIT(2) - Terminate retries when Tx rate retry policy + * finishes. + * BIT(3) - Count initial frame transmission as part of + * rate retry counting but not as a retry + * attempt */ + u8 flags; + u8 rate_recoveries; + u8 reserved[3]; + __le32 rate_count_indices[3]; +} __packed; + +struct wsm_set_tx_rate_retry_policy { + u8 num; + u8 reserved[3]; + struct wsm_tx_rate_retry_policy tbl[8]; +} __packed; + +static inline int wsm_set_tx_rate_retry_policy(struct cw1200_common *priv, + struct wsm_set_tx_rate_retry_policy *arg) +{ + size_t size = 4 + arg->num * sizeof(struct wsm_tx_rate_retry_policy); + return wsm_write_mib(priv, WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, + size); +} + +/* 4.32 SetEtherTypeDataFrameFilter */ +struct wsm_ether_type_filter_hdr { + u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */ + u8 reserved[3]; +} __packed; + +struct wsm_ether_type_filter { + u8 action; /* WSM_FILTER_ACTION_XXX */ + u8 reserved; + __le16 type; /* Type of ethernet frame */ +} __packed; + +static inline int wsm_set_ether_type_filter(struct cw1200_common *priv, + struct wsm_ether_type_filter_hdr *arg) +{ + size_t size = sizeof(struct wsm_ether_type_filter_hdr) + + arg->num * sizeof(struct wsm_ether_type_filter); + return wsm_write_mib(priv, WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER, + arg, size); +} + +/* 4.33 SetUDPPortDataFrameFilter */ +struct wsm_udp_port_filter_hdr { + u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */ + u8 reserved[3]; +} __packed; + +struct wsm_udp_port_filter { + u8 action; /* WSM_FILTER_ACTION_XXX */ + u8 type; /* WSM_FILTER_PORT_TYPE_XXX */ + __le16 port; /* Port number */ +} __packed; + +static inline int wsm_set_udp_port_filter(struct cw1200_common *priv, + struct wsm_udp_port_filter_hdr *arg) +{ + size_t size = sizeof(struct wsm_udp_port_filter_hdr) + + arg->num * sizeof(struct wsm_udp_port_filter); + return wsm_write_mib(priv, WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER, + arg, size); +} + +/* Undocumented MIBs: */ +/* 4.35 P2PDeviceInfo */ +#define D11_MAX_SSID_LEN (32) + +struct wsm_p2p_device_type { + __le16 categoryId; + u8 oui[4]; + __le16 subCategoryId; +} __packed; + +struct wsm_p2p_device_info { + struct wsm_p2p_device_type primaryDevice; + u8 reserved1[3]; + u8 devNameSize; + u8 localDevName[D11_MAX_SSID_LEN]; + u8 reserved2[3]; + u8 numSecDevSupported; + struct wsm_p2p_device_type secondaryDevices[0]; +} __packed; + +/* 4.36 SetWCDMABand - WO */ +struct wsm_cdma_band { + u8 WCDMA_Band; + u8 reserved[3]; +} __packed; + +/* 4.37 GroupTxSequenceCounter - RO */ +struct wsm_group_tx_seq { + __le32 bits_47_16; + __le16 bits_15_00; + __le16 reserved; +} __packed; + +/* 4.39 SetHtProtection - WO */ +#define WSM_DUAL_CTS_PROT_ENB (1 << 0) +#define WSM_NON_GREENFIELD_STA_PRESENT (1 << 1) +#define WSM_HT_PROT_MODE__NO_PROT (0 << 2) +#define WSM_HT_PROT_MODE__NON_MEMBER (1 << 2) +#define WSM_HT_PROT_MODE__20_MHZ (2 << 2) +#define WSM_HT_PROT_MODE__NON_HT_MIXED (3 << 2) +#define WSM_LSIG_TXOP_PROT_FULL (1 << 4) +#define WSM_LARGE_L_LENGTH_PROT (1 << 5) + +struct wsm_ht_protection { + __le32 flags; +} __packed; + +/* 4.40 GPIO Command - R/W */ +#define WSM_GPIO_COMMAND_SETUP 0 +#define WSM_GPIO_COMMAND_READ 1 +#define WSM_GPIO_COMMAND_WRITE 2 +#define WSM_GPIO_COMMAND_RESET 3 +#define WSM_GPIO_ALL_PINS 0xFF + +struct wsm_gpio_command { + u8 GPIO_Command; + u8 pin; + __le16 config; +} __packed; + +/* 4.41 TSFCounter - RO */ +struct wsm_tsf_counter { + __le64 TSF_Counter; +} __packed; + +/* 4.43 Keep alive period */ +struct wsm_keep_alive_period { + __le16 keepAlivePeriod; + u8 reserved[2]; +} __packed; + +static inline int wsm_keep_alive_period(struct cw1200_common *priv, + int period) +{ + struct wsm_keep_alive_period arg = { + .keepAlivePeriod = __cpu_to_le16(period), + }; + return wsm_write_mib(priv, WSM_MIB_ID_KEEP_ALIVE_PERIOD, + &arg, sizeof(arg)); +}; + +/* BSSID filtering */ +struct wsm_set_bssid_filtering { + u8 filter; + u8 reserved[3]; +} __packed; + +static inline int wsm_set_bssid_filtering(struct cw1200_common *priv, + bool enabled) +{ + struct wsm_set_bssid_filtering arg = { + .filter = !enabled, + }; + return wsm_write_mib(priv, WSM_MIB_ID_DISABLE_BSSID_FILTER, + &arg, sizeof(arg)); +} + +/* Multicast filtering - 4.5 */ +struct wsm_mib_multicast_filter { + __le32 enable; + __le32 num_addrs; + u8 macaddrs[WSM_MAX_GRP_ADDRTABLE_ENTRIES][ETH_ALEN]; +} __packed; + +static inline int wsm_set_multicast_filter(struct cw1200_common *priv, + struct wsm_mib_multicast_filter *fp) +{ + return wsm_write_mib(priv, WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE, + fp, sizeof(*fp)); +} + +/* ARP IPv4 filtering - 4.10 */ +struct wsm_mib_arp_ipv4_filter { + __le32 enable; + __be32 ipv4addrs[WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES]; +} __packed; + +static inline int wsm_set_arp_ipv4_filter(struct cw1200_common *priv, + struct wsm_mib_arp_ipv4_filter *fp) +{ + return wsm_write_mib(priv, WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE, + fp, sizeof(*fp)); +} + +/* P2P Power Save Mode Info - 4.31 */ +struct wsm_p2p_ps_modeinfo { + u8 oppPsCTWindow; + u8 count; + u8 reserved; + u8 dtimCount; + __le32 duration; + __le32 interval; + __le32 startTime; +} __packed; + +static inline int wsm_set_p2p_ps_modeinfo(struct cw1200_common *priv, + struct wsm_p2p_ps_modeinfo *mi) +{ + return wsm_write_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO, + mi, sizeof(*mi)); +} + +static inline int wsm_get_p2p_ps_modeinfo(struct cw1200_common *priv, + struct wsm_p2p_ps_modeinfo *mi) +{ + return wsm_read_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO, + mi, sizeof(*mi)); +} + +/* UseMultiTxConfMessage */ + +static inline int wsm_use_multi_tx_conf(struct cw1200_common *priv, + bool enabled) +{ + __le32 arg = enabled ? __cpu_to_le32(1) : 0; + + return wsm_write_mib(priv, WSM_MIB_USE_MULTI_TX_CONF, + &arg, sizeof(arg)); +} + + +/* 4.26 SetUpasdInformation */ +struct wsm_uapsd_info { + __le16 uapsd_flags; + __le16 min_auto_trigger_interval; + __le16 max_auto_trigger_interval; + __le16 auto_trigger_step; +}; + +static inline int wsm_set_uapsd_info(struct cw1200_common *priv, + struct wsm_uapsd_info *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_SET_UAPSD_INFORMATION, + arg, sizeof(*arg)); +} + +/* 4.22 OverrideInternalTxRate */ +struct wsm_override_internal_txrate { + u8 internalTxRate; + u8 nonErpInternalTxRate; + u8 reserved[2]; +} __packed; + +static inline int wsm_set_override_internal_txrate(struct cw1200_common *priv, + struct wsm_override_internal_txrate *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE, + arg, sizeof(*arg)); +} + +/* ******************************************************************** */ +/* WSM TX port control */ + +void wsm_lock_tx(struct cw1200_common *priv); +void wsm_lock_tx_async(struct cw1200_common *priv); +bool wsm_flush_tx(struct cw1200_common *priv); +void wsm_unlock_tx(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* WSM / BH API */ + +int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len); +int wsm_handle_rx(struct cw1200_common *priv, u16 id, struct wsm_hdr *wsm, + struct sk_buff **skb_p); + +/* ******************************************************************** */ +/* wsm_buf API */ + +struct wsm_buf { + u8 *begin; + u8 *data; + u8 *end; +}; + +void wsm_buf_init(struct wsm_buf *buf); +void wsm_buf_deinit(struct wsm_buf *buf); + +/* ******************************************************************** */ +/* wsm_cmd API */ + +struct wsm_cmd { + spinlock_t lock; /* Protect structure from multiple access */ + int done; + u8 *ptr; + size_t len; + void *arg; + int ret; + u16 cmd; +}; + +/* ******************************************************************** */ +/* WSM TX buffer access */ + +int wsm_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst); +void wsm_txed(struct cw1200_common *priv, u8 *data); + +/* ******************************************************************** */ +/* Queue mapping: WSM <---> linux */ +/* Linux: VO VI BE BK */ +/* WSM: BE BK VI VO */ + +static inline u8 wsm_queue_id_to_linux(u8 queue_id) +{ + static const u8 queue_mapping[] = { + 2, 3, 1, 0 + }; + return queue_mapping[queue_id]; +} + +static inline u8 wsm_queue_id_to_wsm(u8 queue_id) +{ + static const u8 queue_mapping[] = { + 3, 2, 0, 1 + }; + return queue_mapping[queue_id]; +} + + +#ifdef CONFIG_CW1200_ETF +int wsm_raw_cmd(struct cw1200_common *priv, u8 *data, size_t len); +#endif + +#endif /* CW1200_HWIO_H_INCLUDED */ diff --git a/include/linux/cw1200_platform.h b/include/linux/cw1200_platform.h new file mode 100644 index 000000000000..c168fa512347 --- /dev/null +++ b/include/linux/cw1200_platform.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Dmitry Tarnyagin + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CW1200_PLAT_H_INCLUDED +#define CW1200_PLAT_H_INCLUDED + +struct cw1200_platform_data_spi { + u8 spi_bits_per_word; /* REQUIRED */ + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + const struct resource *reset; /* GPIO to RSTn signal */ + const struct resource *powerup; /* GPIO to POWERUP signal */ + int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + +struct cw1200_platform_data_sdio { + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + const struct resource *irq; /* if using GPIO for IRQ */ + bool have_5ghz; + bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ + const struct resource *reset; /* GPIO to RSTn signal */ + const struct resource *powerup; /* GPIO to POWERUP signal */ + int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + +const void *cw1200_get_platform_data(void); + +#endif /* CW1200_PLAT_H_INCLUDED */ -- cgit v1.2.3 From 5f292354e7ff69afbe9a272ef997b8f3edaf860a Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Wed, 29 May 2013 09:32:45 +0000 Subject: net: mv643xx_eth: add phy_node to platform_data struct This adds a struct device_node pointer for a phy passed by phandle to mv643xx_eth node. Signed-off-by: Sebastian Hesselbarth Signed-off-by: David S. Miller --- include/linux/mv643xx_eth.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 141d395bbb5f..6e8215b15998 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -30,6 +30,7 @@ struct mv643xx_eth_shared_platform_data { #define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x)) #define MV643XX_ETH_PHY_NONE 0xff +struct device_node; struct mv643xx_eth_platform_data { /* * Pointer back to our parent instance, and our port number. @@ -41,6 +42,7 @@ struct mv643xx_eth_platform_data { * Whether a PHY is present, and if yes, at which address. */ int phy_addr; + struct device_node *phy_node; /* * Use this MAC address if it is valid, overriding the -- cgit v1.2.3 From 3da09a5154edb92fe74c266a4fa900bcbed6d44c Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 30 May 2013 20:08:26 +0000 Subject: phy: Add Marvell 88E1116R phy ID This phy is on Xilinx ZC702 zynq development board. Signed-off-by: Anirudha Sarangi Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 65 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 66 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 371353c81f3c..df5a9f6d2864 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -116,6 +116,8 @@ #define MII_M1011_PHY_STATUS_RESOLVED 0x0800 #define MII_M1011_PHY_STATUS_LINK 0x0400 +#define MII_M1116R_CONTROL_REG_MAC 21 + MODULE_DESCRIPTION("Marvell PHY driver"); MODULE_AUTHOR("Andy Fleming"); @@ -372,6 +374,55 @@ static int m88e1318_config_aneg(struct phy_device *phydev) return m88e1121_config_aneg(phydev); } +static int m88e1116r_config_init(struct phy_device *phydev) +{ + int temp; + int err; + + temp = phy_read(phydev, MII_BMCR); + temp |= BMCR_RESET; + err = phy_write(phydev, MII_BMCR, temp); + if (err < 0) + return err; + + mdelay(500); + + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0); + if (err < 0) + return err; + + temp = phy_read(phydev, MII_M1011_PHY_SCR); + temp |= (7 << 12); /* max number of gigabit attempts */ + temp |= (1 << 11); /* enable downshift */ + temp |= MII_M1011_PHY_SCR_AUTO_CROSS; + err = phy_write(phydev, MII_M1011_PHY_SCR, temp); + if (err < 0) + return err; + + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 2); + if (err < 0) + return err; + temp = phy_read(phydev, MII_M1116R_CONTROL_REG_MAC); + temp |= (1 << 5); + temp |= (1 << 4); + err = phy_write(phydev, MII_M1116R_CONTROL_REG_MAC, temp); + if (err < 0) + return err; + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0); + if (err < 0) + return err; + + temp = phy_read(phydev, MII_BMCR); + temp |= BMCR_RESET; + err = phy_write(phydev, MII_BMCR, temp); + if (err < 0) + return err; + + mdelay(500); + + return 0; +} + static int m88e1111_config_init(struct phy_device *phydev) { int err; @@ -940,6 +991,19 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .driver = { .owner = THIS_MODULE }, }, + { + .phy_id = MARVELL_PHY_ID_88E1116R, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1116R", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &m88e1116r_config_init, + .config_aneg = &genphy_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .driver = { .owner = THIS_MODULE }, + }, }; static int __init marvell_init(void) @@ -967,6 +1031,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1149R, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1240, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index dd3c34ebca9a..ec41025cb86e 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -14,6 +14,7 @@ #define MARVELL_PHY_ID_88E1149R 0x01410e50 #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 +#define MARVELL_PHY_ID_88E1116R 0x01410e40 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 -- cgit v1.2.3 From 10e24caa98b6dd0d3d8b783453d5c07aa4e9af0c Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 30 May 2013 20:08:27 +0000 Subject: phy: Add Marvell 88E1510 phy ID Add support for this new phy ID. Signed-off-by: Rick Hoover Signed-off-by: Steven Wang Signed-off-by: Lars-Peter Clausen Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 25 +++++++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 26 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index df5a9f6d2864..2e91477362d4 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -374,6 +374,17 @@ static int m88e1318_config_aneg(struct phy_device *phydev) return m88e1121_config_aneg(phydev); } +static int m88e1510_config_aneg(struct phy_device *phydev) +{ + int err; + + err = m88e1318_config_aneg(phydev); + if (err < 0) + return err; + + return marvell_of_reg_init(phydev); +} + static int m88e1116r_config_init(struct phy_device *phydev) { int temp; @@ -1004,6 +1015,19 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .driver = { .owner = THIS_MODULE }, }, + { + .phy_id = MARVELL_PHY_ID_88E1510, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1510", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &m88e1510_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .driver = { .owner = THIS_MODULE }, + }, }; static int __init marvell_init(void) @@ -1032,6 +1056,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1240, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index ec41025cb86e..8e9a029e093d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -15,6 +15,7 @@ #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 #define MARVELL_PHY_ID_88E1116R 0x01410e40 +#define MARVELL_PHY_ID_88E1510 0x01410dd0 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 -- cgit v1.2.3 From 35d0461061f27eeb62de63174959edbbb9e434de Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 29 May 2013 15:16:05 +0800 Subject: net: clean up skb headers code commit 1a37e412a0225fcba5587 (net: Use 16bits for *_headers fields of struct skbuff) converts skb->*_header to u16, some #if NET_SKBUFF_DATA_USES_OFFSET are now useless, and to be safe, we could just use "X = (typeof(X)) ~0U;" as suggested by David. Cc: David S. Miller Cc: Simon Horman Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++-- net/core/skbuff.c | 16 +++++----------- 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5f931191cf57..b9997907a0f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1593,7 +1593,7 @@ static inline void skb_set_inner_mac_header(struct sk_buff *skb, } static inline bool skb_transport_header_was_set(const struct sk_buff *skb) { - return skb->transport_header != ~0U; + return skb->transport_header != (typeof(skb->transport_header))~0U; } static inline unsigned char *skb_transport_header(const struct sk_buff *skb) @@ -1636,7 +1636,7 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb) static inline int skb_mac_header_was_set(const struct sk_buff *skb) { - return skb->mac_header != ~0U; + return skb->mac_header != (typeof(skb->mac_header))~0U; } static inline void skb_reset_mac_header(struct sk_buff *skb) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f45de077ab9e..6b1b52c5593b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -199,9 +199,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) skb->truesize = sizeof(struct sk_buff); atomic_set(&skb->users, 1); -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->mac_header = (__u16) ~0U; -#endif + skb->mac_header = (typeof(skb->mac_header))~0U; out: return skb; } @@ -275,10 +273,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->mac_header = (__u16) ~0U; - skb->transport_header = (__u16) ~0U; -#endif + skb->mac_header = (typeof(skb->mac_header))~0U; + skb->transport_header = (typeof(skb->transport_header))~0U; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); @@ -344,10 +340,8 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->mac_header = (__u16) ~0U; - skb->transport_header = (__u16) ~0U; -#endif + skb->mac_header = (typeof(skb->mac_header))~0U; + skb->transport_header = (typeof(skb->transport_header))~0U; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); -- cgit v1.2.3 From bf3d6a8f791b2a81279b9ce3201b4970f6fbe51a Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 27 May 2013 23:48:15 +0000 Subject: iptunnel: specify protocol outside IP header Before this patch, ip_tunnel_xmit() was using the field protocol from the IP header passed into argument. There is no functional change, this patch prepares the support of IPv4 over IPv4 for module sit. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 2 +- net/ipv4/ip_gre.c | 2 +- net/ipv4/ip_tunnel.c | 4 ++-- net/ipv4/ipip.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 4b6f0b28f41f..40b4dfce01fc 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -101,7 +101,7 @@ int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn); void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, - const struct iphdr *tnl_params); + const struct iphdr *tnl_params, const u8 protocol); int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 2a83591492dd..a982657d05e7 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -429,7 +429,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, return; } - ip_tunnel_xmit(skb, dev, tnl_params); + ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); } static netdev_tx_t ipgre_xmit(struct sk_buff *skb, diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index e4147ec1665a..b89095c1518f 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -487,7 +487,7 @@ drop: EXPORT_SYMBOL_GPL(ip_tunnel_rcv); void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, - const struct iphdr *tnl_params) + const struct iphdr *tnl_params, const u8 protocol) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *inner_iph; @@ -670,7 +670,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; - iph->protocol = tnl_params->protocol; + iph->protocol = protocol; iph->tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); iph->daddr = fl4.daddr; iph->saddr = fl4.saddr; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 77bfcce64fe5..9df7ecd393f2 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -222,7 +222,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) skb->encapsulation = 1; } - ip_tunnel_xmit(skb, dev, tiph); + ip_tunnel_xmit(skb, dev, tiph, tiph->protocol); return NETDEV_TX_OK; tx_error: -- cgit v1.2.3 From 2cc70ba4cf5f97a7cf08063d2fae693d36b462eb Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 28 May 2013 04:07:21 +0000 Subject: phy: add reverse MII PHY connection type The PHY library currently does not know about the the reverse MII connection type. Add it to the list of supported PHY modes and update of_get_phy_mode() to support it and look for the string "rev-mii". Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/of/of_net.c | 1 + include/linux/phy.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c index ffab033d207e..ea174c8ee34b 100644 --- a/drivers/of/of_net.c +++ b/drivers/of/of_net.c @@ -22,6 +22,7 @@ static const char *phy_modes[] = { [PHY_INTERFACE_MODE_GMII] = "gmii", [PHY_INTERFACE_MODE_SGMII] = "sgmii", [PHY_INTERFACE_MODE_TBI] = "tbi", + [PHY_INTERFACE_MODE_REVMII] = "rev-mii", [PHY_INTERFACE_MODE_RMII] = "rmii", [PHY_INTERFACE_MODE_RGMII] = "rgmii", [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", diff --git a/include/linux/phy.h b/include/linux/phy.h index ee411b0eef5d..64ab823f7b74 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -58,6 +58,7 @@ typedef enum { PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_SGMII, PHY_INTERFACE_MODE_TBI, + PHY_INTERFACE_MODE_REVMII, PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_RGMII_ID, -- cgit v1.2.3 From 5aad1de5ea2c260b4cd2f70b70e146d55dbbc528 Mon Sep 17 00:00:00 2001 From: Timo Teräs Date: Mon, 27 May 2013 20:46:33 +0000 Subject: ipv4: use separate genid for next hop exceptions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 13d82bf5 (ipv4: Fix flushing of cached routing informations) added the support to flush learned pmtu information. However, using rt_genid is quite heavy as it is bumped on route add/change and multicast events amongst other places. These can happen quite often, especially if using dynamic routing protocols. While this is ok with routes (as they are just recreated locally), the pmtu information is learned from remote systems and the icmp notification can come with long delays. It is worthy to have separate genid to avoid excessive pmtu resets. Cc: Steffen Klassert Signed-off-by: Timo Teräs Signed-off-by: David S. Miller --- include/net/ip_fib.h | 1 + include/net/net_namespace.h | 11 +++++++++++ net/ipv4/route.c | 12 ++++++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index e49db91593a9..44424e9dab2a 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -51,6 +51,7 @@ struct rtable; struct fib_nh_exception { struct fib_nh_exception __rcu *fnhe_next; + int fnhe_genid; __be32 fnhe_daddr; u32 fnhe_pmtu; __be32 fnhe_gw; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index b17697827482..495bc57f292c 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -118,6 +118,7 @@ struct net { struct netns_ipvs *ipvs; struct sock *diag_nlsk; atomic_t rt_genid; + atomic_t fnhe_genid; }; /* @@ -340,4 +341,14 @@ static inline void rt_genid_bump(struct net *net) atomic_inc(&net->rt_genid); } +static inline int fnhe_genid(struct net *net) +{ + return atomic_read(&net->fnhe_genid); +} + +static inline void fnhe_genid_bump(struct net *net) +{ + atomic_inc(&net->fnhe_genid); +} + #endif /* __NET_NET_NAMESPACE_H */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a4082be1b9b4..403e28302869 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -658,6 +658,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_next = hash->chain; rcu_assign_pointer(hash->chain, fnhe); } + fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev)); fnhe->fnhe_daddr = daddr; fnhe->fnhe_gw = gw; fnhe->fnhe_pmtu = pmtu; @@ -1236,8 +1237,11 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, spin_lock_bh(&fnhe_lock); if (daddr == fnhe->fnhe_daddr) { + int genid = fnhe_genid(dev_net(rt->dst.dev)); struct rtable *orig = rcu_dereference(fnhe->fnhe_rth); - if (orig && rt_is_expired(orig)) { + + if (fnhe->fnhe_genid != genid) { + fnhe->fnhe_genid = genid; fnhe->fnhe_gw = 0; fnhe->fnhe_pmtu = 0; fnhe->fnhe_expires = 0; @@ -2443,8 +2447,11 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { + struct net *net = (struct net *)__ctl->extra1; + if (write) { - rt_cache_flush((struct net *)__ctl->extra1); + rt_cache_flush(net); + fnhe_genid_bump(net); return 0; } @@ -2619,6 +2626,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { static __net_init int rt_genid_init(struct net *net) { atomic_set(&net->rt_genid, 0); + atomic_set(&net->fnhe_genid, 0); get_random_bytes(&net->ipv4.dev_addr_genid, sizeof(net->ipv4.dev_addr_genid)); return 0; -- cgit v1.2.3 From e05ecccdf752122a439b03c3190458d2c8f0bac6 Mon Sep 17 00:00:00 2001 From: Jacob Minshall Date: Wed, 29 May 2013 14:32:36 -0700 Subject: mac80211: set mesh formation field properly Cap max peerings at 63 in accordance with IEEE-2012 8.4.2.100.7. Triggers a beacon regeneration every time the number of peerings changes. Previously this would only happen if the "accepting peerings" bit changed. Signed-off-by: Jacob Minshall Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 1 + net/mac80211/mesh.c | 3 +-- net/mac80211/mesh.h | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index d826e5a84af0..b0dc87a2a376 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -146,6 +146,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_MAX_RTS_THRESHOLD 2353 #define IEEE80211_MAX_AID 2007 #define IEEE80211_MAX_TIM_LEN 251 +#define IEEE80211_MAX_MESH_PEERINGS 63 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section 6.2.1.1.2. diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index b3d1fdd46368..73a597bad6e0 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -274,8 +274,7 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, *pos++ = ifmsh->mesh_auth_id; /* Mesh Formation Info - number of neighbors */ neighbors = atomic_read(&ifmsh->estab_plinks); - /* Number of neighbor mesh STAs or 15 whichever is smaller */ - neighbors = (neighbors > 15) ? 15 : neighbors; + neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS); *pos++ = neighbors << 1; /* Mesh capability */ *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index da158774eebb..8b4d9a3e9eee 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -324,14 +324,14 @@ static inline u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_inc(&sdata->u.mesh.estab_plinks); - return mesh_accept_plinks_update(sdata); + return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; } static inline u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_dec(&sdata->u.mesh.estab_plinks); - return mesh_accept_plinks_update(sdata); + return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; } static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) -- cgit v1.2.3 From 964dc9e2c3aaccacacd40640964a58544fb5769a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 3 Jun 2013 17:25:34 +0200 Subject: cfg80211: take WoWLAN support information out of wiphy struct There's no need to take up the space for devices that don't support WoWLAN, and most drivers can even make the support data static const (except where it's modified at runtime.) Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath6kl/cfg80211.c | 24 ++++++++----- drivers/net/wireless/ath/ath9k/init.c | 20 ++++++----- drivers/net/wireless/iwlwifi/dvm/dev.h | 3 ++ drivers/net/wireless/iwlwifi/dvm/mac80211.c | 17 ++++----- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 26 +++++++------- drivers/net/wireless/iwlwifi/mvm/mvm.h | 1 + drivers/net/wireless/mwifiex/cfg80211.c | 16 ++++++--- drivers/net/wireless/ti/wlcore/main.c | 19 +++++----- include/net/cfg80211.h | 2 +- net/mac80211/main.c | 3 +- net/wireless/core.c | 20 ++++++----- net/wireless/nl80211.c | 56 ++++++++++++++--------------- 12 files changed, 115 insertions(+), 92 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 5c9736a94e54..f7995b2b12a4 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -3679,6 +3679,20 @@ err: return NULL; } +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support ath6kl_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_4WAY_HANDSHAKE, + .n_patterns = WOW_MAX_FILTERS_PER_LIST, + .pattern_min_len = 1, + .pattern_max_len = WOW_PATTERN_SIZE, +}; +#endif + int ath6kl_cfg80211_init(struct ath6kl *ar) { struct wiphy *wiphy = ar->wiphy; @@ -3772,15 +3786,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); #ifdef CONFIG_PM - wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_4WAY_HANDSHAKE; - wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST; - wiphy->wowlan.pattern_min_len = 1; - wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE; + wiphy->wowlan = &ath6kl_wowlan_support; #endif wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 0237b2868961..f993362e9e13 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -755,6 +755,15 @@ static const struct ieee80211_iface_combination if_comb[] = { } }; +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support ath9k_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .n_patterns = MAX_NUM_USER_PATTERN, + .pattern_min_len = 1, + .pattern_max_len = MAX_PATTERN_SIZE, +}; +#endif + void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) { struct ath_hw *ah = sc->sc_ah; @@ -797,15 +806,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) #ifdef CONFIG_PM_SLEEP if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && - device_can_wakeup(sc->dev)) { - - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT; - hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN; - hw->wiphy->wowlan.pattern_min_len = 1; - hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE; - - } + device_can_wakeup(sc->dev)) + hw->wiphy->wowlan = &ath9k_wowlan_support; atomic_set(&sc->wow_sleep_proc_intr, -1); atomic_set(&sc->wow_got_bmiss_intr, -1); diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index 71ea77576d22..e71acfd344aa 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -870,6 +870,9 @@ struct iwl_priv { __le64 replay_ctr; __le16 last_seq_ctl; bool have_rekey_data; +#ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan_support; +#endif /* device_pointers: pointers to ucode event tables */ struct { diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index cab23af0be9e..661b5e71aac8 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -208,20 +208,21 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, priv->trans->ops->d3_suspend && priv->trans->ops->d3_resume && device_can_wakeup(priv->trans->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; + priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; if (!iwlwifi_mod_params.sw_crypto) - hw->wiphy->wowlan.flags |= + priv->wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE; - hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; - hw->wiphy->wowlan.pattern_min_len = + priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; + priv->wowlan_support.pattern_min_len = IWLAGN_WOWLAN_MIN_PATTERN_LEN; - hw->wiphy->wowlan.pattern_max_len = + priv->wowlan_support.pattern_max_len = IWLAGN_WOWLAN_MAX_PATTERN_LEN; + hw->wiphy->wowlan = &priv->wowlan_support; } #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index dd158ec571fb..827fa641490f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -222,20 +222,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; + mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; if (!iwlwifi_mod_params.sw_crypto) - hw->wiphy->wowlan.flags |= - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_4WAY_HANDSHAKE; - - hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; - hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; - hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; - hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; + mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_4WAY_HANDSHAKE; + + mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; + mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; + mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; + mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; + hw->wiphy->wowlan = &mvm->wowlan; } #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 8269bc562951..b62a948658ac 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -336,6 +336,7 @@ struct iwl_mvm { struct ieee80211_vif *p2p_device_vif; #ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; #endif diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index e42b266a023a..60077895adff 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2426,6 +2426,16 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { #endif }; +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support mwifiex_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT, + .n_patterns = MWIFIEX_MAX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, + .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, +}; +#endif + /* * This function registers the device with CFG802.11 subsystem. * @@ -2483,11 +2493,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom); #ifdef CONFIG_PM - wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT; - wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS; - wiphy->wowlan.pattern_min_len = 1; - wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN; - wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN; + wiphy->wowlan = &mwifiex_wowlan_support; #endif wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 953111a502ee..796928ba875f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6018,6 +6018,15 @@ int wlcore_free_hw(struct wl1271 *wl) } EXPORT_SYMBOL_GPL(wlcore_free_hw); +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support wlcore_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY, + .n_patterns = WL1271_MAX_RX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE, +}; +#endif + static void wlcore_nvs_cb(const struct firmware *fw, void *context) { struct wl1271 *wl = context; @@ -6071,14 +6080,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) if (!ret) { wl->irq_wake_enabled = true; device_init_wakeup(wl->dev, 1); - if (pdata->pwr_in_suspend) { - wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; - wl->hw->wiphy->wowlan.n_patterns = - WL1271_MAX_RX_FILTERS; - wl->hw->wiphy->wowlan.pattern_min_len = 1; - wl->hw->wiphy->wowlan.pattern_max_len = - WL1271_RX_FILTER_MAX_PATTERN_SIZE; - } + if (pdata->pwr_in_suspend) + wl->hw->wiphy->wowlan = &wlcore_wowlan_support; } #endif disable_irq(wl->irq); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 6dd19593e333..6169fca216b4 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2654,7 +2654,7 @@ struct wiphy { u32 hw_version; #ifdef CONFIG_PM - struct wiphy_wowlan_support wowlan; + const struct wiphy_wowlan_support *wowlan; struct cfg80211_wowlan *wowlan_config; #endif diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 1998f1475267..626c83c042d7 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -686,8 +686,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) return -EINVAL; #ifdef CONFIG_PM - if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) && - (!local->ops->suspend || !local->ops->resume)) + if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) return -EINVAL; #endif diff --git a/net/wireless/core.c b/net/wireless/core.c index 41cec1776f4f..f553b9484c1e 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -449,8 +449,13 @@ int wiphy_register(struct wiphy *wiphy) u16 ifmodes = wiphy->interface_modes; #ifdef CONFIG_PM - if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && - !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) + if (WARN_ON(wiphy->wowlan && + (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && + !(wiphy->wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) + return -EINVAL; + if (WARN_ON(wiphy->wowlan && + !wiphy->wowlan->flags && !wiphy->wowlan->n_patterns && + !wiphy->wowlan->tcp)) return -EINVAL; #endif @@ -540,12 +545,11 @@ int wiphy_register(struct wiphy *wiphy) } #ifdef CONFIG_PM - if (rdev->wiphy.wowlan.n_patterns) { - if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len || - rdev->wiphy.wowlan.pattern_min_len > - rdev->wiphy.wowlan.pattern_max_len)) - return -EINVAL; - } + if (WARN_ON(rdev->wiphy.wowlan && rdev->wiphy.wowlan->n_patterns && + (!rdev->wiphy.wowlan->pattern_min_len || + rdev->wiphy.wowlan->pattern_min_len > + rdev->wiphy.wowlan->pattern_max_len))) + return -EINVAL; #endif /* check and set up bitrates */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 31d265f36d2c..7ee9af3283a8 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -908,7 +908,7 @@ nla_put_failure: static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { - const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp; + const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan->tcp; struct nlattr *nl_tcp; if (!tcp) @@ -951,37 +951,37 @@ static int nl80211_send_wowlan(struct sk_buff *msg, { struct nlattr *nl_wowlan; - if (!dev->wiphy.wowlan.flags && !dev->wiphy.wowlan.n_patterns) + if (!dev->wiphy.wowlan) return 0; nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); if (!nl_wowlan) return -ENOBUFS; - if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) && + if (((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) return -ENOBUFS; - if (dev->wiphy.wowlan.n_patterns) { + if (dev->wiphy.wowlan->n_patterns) { struct nl80211_wowlan_pattern_support pat = { - .max_patterns = dev->wiphy.wowlan.n_patterns, - .min_pattern_len = dev->wiphy.wowlan.pattern_min_len, - .max_pattern_len = dev->wiphy.wowlan.pattern_max_len, - .max_pkt_offset = dev->wiphy.wowlan.max_pkt_offset, + .max_patterns = dev->wiphy.wowlan->n_patterns, + .min_pattern_len = dev->wiphy.wowlan->pattern_min_len, + .max_pattern_len = dev->wiphy.wowlan->pattern_max_len, + .max_pkt_offset = dev->wiphy.wowlan->max_pkt_offset, }; if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, @@ -7580,8 +7580,7 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) void *hdr; u32 size = NLMSG_DEFAULT_SIZE; - if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns && - !rdev->wiphy.wowlan.tcp) + if (!rdev->wiphy.wowlan) return -EOPNOTSUPP; if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) { @@ -7654,7 +7653,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, u32 data_size, wake_size, tokens_size = 0, wake_mask_size; int err, port; - if (!rdev->wiphy.wowlan.tcp) + if (!rdev->wiphy.wowlan->tcp) return -EINVAL; err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP, @@ -7674,16 +7673,16 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, return -EINVAL; data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]); - if (data_size > rdev->wiphy.wowlan.tcp->data_payload_max) + if (data_size > rdev->wiphy.wowlan->tcp->data_payload_max) return -EINVAL; if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > - rdev->wiphy.wowlan.tcp->data_interval_max || + rdev->wiphy.wowlan->tcp->data_interval_max || nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0) return -EINVAL; wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); - if (wake_size > rdev->wiphy.wowlan.tcp->wake_payload_max) + if (wake_size > rdev->wiphy.wowlan->tcp->wake_payload_max) return -EINVAL; wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]); @@ -7698,13 +7697,13 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, if (!tok->len || tokens_size % tok->len) return -EINVAL; - if (!rdev->wiphy.wowlan.tcp->tok) + if (!rdev->wiphy.wowlan->tcp->tok) return -EINVAL; - if (tok->len > rdev->wiphy.wowlan.tcp->tok->max_len) + if (tok->len > rdev->wiphy.wowlan->tcp->tok->max_len) return -EINVAL; - if (tok->len < rdev->wiphy.wowlan.tcp->tok->min_len) + if (tok->len < rdev->wiphy.wowlan->tcp->tok->min_len) return -EINVAL; - if (tokens_size > rdev->wiphy.wowlan.tcp->tok->bufsize) + if (tokens_size > rdev->wiphy.wowlan->tcp->tok->bufsize) return -EINVAL; if (tok->offset + tok->len > data_size) return -EINVAL; @@ -7712,7 +7711,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) { seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]); - if (!rdev->wiphy.wowlan.tcp->seq) + if (!rdev->wiphy.wowlan->tcp->seq) return -EINVAL; if (seq->len == 0 || seq->len > 4) return -EINVAL; @@ -7793,12 +7792,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; struct cfg80211_wowlan new_triggers = {}; struct cfg80211_wowlan *ntrig; - struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; + const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan; int err, i; bool prev_enabled = rdev->wiphy.wowlan_config; - if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns && - !rdev->wiphy.wowlan.tcp) + if (!wowlan) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { -- cgit v1.2.3 From c992219825823cad5e11fab3854eb93df2e9ffa1 Mon Sep 17 00:00:00 2001 From: Solomon Peachy Date: Sun, 2 Jun 2013 09:53:01 -0400 Subject: cw1200: move platform_data header to correct location. (As suggested by Arnd Bergmann) Signed-off-by: Solomon Peachy Signed-off-by: John W. Linville --- drivers/net/wireless/cw1200/cw1200_sagrad.c | 2 +- drivers/net/wireless/cw1200/cw1200_sdio.c | 2 +- drivers/net/wireless/cw1200/cw1200_spi.c | 2 +- include/linux/cw1200_platform.h | 46 --------------------------- include/linux/platform_data/cw1200_platform.h | 46 +++++++++++++++++++++++++++ 5 files changed, 49 insertions(+), 49 deletions(-) delete mode 100644 include/linux/cw1200_platform.h create mode 100644 include/linux/platform_data/cw1200_platform.h (limited to 'include') diff --git a/drivers/net/wireless/cw1200/cw1200_sagrad.c b/drivers/net/wireless/cw1200/cw1200_sagrad.c index a5ada0eda1dd..14c2a186b493 100644 --- a/drivers/net/wireless/cw1200/cw1200_sagrad.c +++ b/drivers/net/wireless/cw1200/cw1200_sagrad.c @@ -10,7 +10,7 @@ */ #include -#include +#include MODULE_AUTHOR("Solomon Peachy "); MODULE_DESCRIPTION("ST-Ericsson CW1200 Platform glue driver"); diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c index 78c3bc55cd0b..9f25ec5641fe 100644 --- a/drivers/net/wireless/cw1200/cw1200_sdio.c +++ b/drivers/net/wireless/cw1200/cw1200_sdio.c @@ -20,7 +20,7 @@ #include "cw1200.h" #include "hwbus.h" -#include +#include #include "hwio.h" MODULE_AUTHOR("Dmitry Tarnyagin "); diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index 75efe54e495f..940e0947a5dc 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -25,7 +25,7 @@ #include "cw1200.h" #include "hwbus.h" -#include +#include #include "hwio.h" MODULE_AUTHOR("Solomon Peachy "); diff --git a/include/linux/cw1200_platform.h b/include/linux/cw1200_platform.h deleted file mode 100644 index c168fa512347..000000000000 --- a/include/linux/cw1200_platform.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (C) ST-Ericsson SA 2011 - * - * Author: Dmitry Tarnyagin - * License terms: GNU General Public License (GPL) version 2 - */ - -#ifndef CW1200_PLAT_H_INCLUDED -#define CW1200_PLAT_H_INCLUDED - -struct cw1200_platform_data_spi { - u8 spi_bits_per_word; /* REQUIRED */ - u16 ref_clk; /* REQUIRED (in KHz) */ - - /* All others are optional */ - bool have_5ghz; - const struct resource *reset; /* GPIO to RSTn signal */ - const struct resource *powerup; /* GPIO to POWERUP signal */ - int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, - bool enable); /* Control 3v3 / 1v8 supply */ - int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, - bool enable); /* Control CLK32K */ - const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ - const char *sdd_file; /* if NULL, will use default for detected hw type */ -}; - -struct cw1200_platform_data_sdio { - u16 ref_clk; /* REQUIRED (in KHz) */ - - /* All others are optional */ - const struct resource *irq; /* if using GPIO for IRQ */ - bool have_5ghz; - bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ - const struct resource *reset; /* GPIO to RSTn signal */ - const struct resource *powerup; /* GPIO to POWERUP signal */ - int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, - bool enable); /* Control 3v3 / 1v8 supply */ - int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, - bool enable); /* Control CLK32K */ - const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ - const char *sdd_file; /* if NULL, will use default for detected hw type */ -}; - -const void *cw1200_get_platform_data(void); - -#endif /* CW1200_PLAT_H_INCLUDED */ diff --git a/include/linux/platform_data/cw1200_platform.h b/include/linux/platform_data/cw1200_platform.h new file mode 100644 index 000000000000..c168fa512347 --- /dev/null +++ b/include/linux/platform_data/cw1200_platform.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Dmitry Tarnyagin + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CW1200_PLAT_H_INCLUDED +#define CW1200_PLAT_H_INCLUDED + +struct cw1200_platform_data_spi { + u8 spi_bits_per_word; /* REQUIRED */ + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + const struct resource *reset; /* GPIO to RSTn signal */ + const struct resource *powerup; /* GPIO to POWERUP signal */ + int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + +struct cw1200_platform_data_sdio { + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + const struct resource *irq; /* if using GPIO for IRQ */ + bool have_5ghz; + bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ + const struct resource *reset; /* GPIO to RSTn signal */ + const struct resource *powerup; /* GPIO to POWERUP signal */ + int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + +const void *cw1200_get_platform_data(void); + +#endif /* CW1200_PLAT_H_INCLUDED */ -- cgit v1.2.3 From 6dd64a304eff76ca7dd41bf63df55efa965fa9ec Mon Sep 17 00:00:00 2001 From: Solomon Peachy Date: Sun, 2 Jun 2013 09:53:03 -0400 Subject: cw1200: Replace use of 'struct resource' with 'int' for GPIO fields. The only advantage of 'struct resource' is that it lets us assign names as part of the platform data. Unfortunately since we are using platform data, we are already limited to a single instance of each driver, rendering this moot. So, replace the struct resources with ints, resulting in cleaner code. This was based on a suggestion from Arnd Bergmann. Signed-off-by: Solomon Peachy Signed-off-by: John W. Linville --- drivers/net/wireless/cw1200/cw1200_sagrad.c | 49 +++----------------------- drivers/net/wireless/cw1200/cw1200_sdio.c | 50 ++++++++++++--------------- drivers/net/wireless/cw1200/cw1200_spi.c | 33 ++++++++---------- include/linux/platform_data/cw1200_platform.h | 12 +++---- 4 files changed, 47 insertions(+), 97 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/cw1200/cw1200_sagrad.c b/drivers/net/wireless/cw1200/cw1200_sagrad.c index 14c2a186b493..3f884ac96ccc 100644 --- a/drivers/net/wireless/cw1200/cw1200_sagrad.c +++ b/drivers/net/wireless/cw1200/cw1200_sagrad.c @@ -21,29 +21,6 @@ MODULE_LICENSE("GPL"); /* #define SAGRAD_1091_1098_EVK_SPI */ #ifdef SAGRAD_1091_1098_EVK_SDIO -#if 0 -static struct resource cw1200_href_resources[] = { - { - .start = 215, /* fix me as appropriate */ - .end = 215, /* ditto */ - .flags = IORESOURCE_IO, - .name = "cw1200_wlan_reset", - }, - { - .start = 216, /* fix me as appropriate */ - .end = 216, /* ditto */ - .flags = IORESOURCE_IO, - .name = "cw1200_wlan_powerup", - }, - { - .start = NOMADIK_GPIO_TO_IRQ(216), /* fix me as appropriate */ - .end = NOMADIK_GPIO_TO_IRQ(216), /* ditto */ - .flags = IORESOURCE_IRQ, - .name = "cw1200_wlan_irq", - }, -}; -#endif - static int cw1200_power_ctrl(const struct cw1200_platform_data_sdio *pdata, bool enable) { @@ -68,9 +45,9 @@ static struct cw1200_platform_data_sdio cw1200_platform_data = { .ref_clk = 38400, .have_5ghz = false, #if 0 - .reset = &cw1200_href_resources[0], - .powerup = &cw1200_href_resources[1], - .irq = &cw1200_href_resources[2], + .reset = GPIO_RF_RESET, /* Replace as appropriate */ + .powerup = GPIO_RF_POWERUP, /* Replace as appropriate */ + .irq = GPIO_TO_IRQ(GPIO_RF_IRQ), /* Replace as appropriate */ #endif .power_ctrl = cw1200_power_ctrl, .clk_ctrl = cw1200_clk_ctrl, @@ -80,22 +57,6 @@ static struct cw1200_platform_data_sdio cw1200_platform_data = { #endif #ifdef SAGRAD_1091_1098_EVK_SPI -/* Note that this is an example of integrating into your board support file */ -static struct resource cw1200_href_resources[] = { - { - .start = GPIO_RF_RESET, - .end = GPIO_RF_RESET, - .flags = IORESOURCE_IO, - .name = "cw1200_wlan_reset", - }, - { - .start = GPIO_RF_POWERUP, - .end = GPIO_RF_POWERUP, - .flags = IORESOURCE_IO, - .name = "cw1200_wlan_powerup", - }, -}; - static int cw1200_power_ctrl(const struct cw1200_platform_data_spi *pdata, bool enable) { @@ -118,8 +79,8 @@ static int cw1200_clk_ctrl(const struct cw1200_platform_data_spi *pdata, static struct cw1200_platform_data_spi cw1200_platform_data = { .ref_clk = 38400, .spi_bits_per_word = 16, - .reset = &cw1200_href_resources[0], - .powerup = &cw1200_href_resources[1], + .reset = GPIO_RF_RESET, /* Replace as appropriate */ + .powerup = GPIO_RF_POWERUP, /* Replace as appropriate */ .power_ctrl = cw1200_power_ctrl, .clk_ctrl = cw1200_clk_ctrl, /* .macaddr = ??? */ diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c index 0a882ca89bbb..574cf727567c 100644 --- a/drivers/net/wireless/cw1200/cw1200_sdio.c +++ b/drivers/net/wireless/cw1200/cw1200_sdio.c @@ -105,7 +105,6 @@ static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id) static int cw1200_request_irq(struct hwbus_priv *self) { int ret; - const struct resource *irq = self->pdata->irq; u8 cccr; cccr = sdio_f0_readb(self->func, SDIO_CCCR_IENx, &ret); @@ -122,15 +121,15 @@ static int cw1200_request_irq(struct hwbus_priv *self) if (WARN_ON(ret)) goto err; - ret = enable_irq_wake(irq->start); + ret = enable_irq_wake(self->pdata->irq); if (WARN_ON(ret)) goto err; /* Request the IRQ */ - ret = request_threaded_irq(irq->start, cw1200_gpio_hardirq, + ret = request_threaded_irq(self->pdata->irq, cw1200_gpio_hardirq, cw1200_gpio_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - irq->name, self); + "cw1200_wlan_irq", self); if (WARN_ON(ret)) goto err; @@ -162,8 +161,8 @@ static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self) pr_debug("SW IRQ unsubscribe\n"); if (self->pdata->irq) { - disable_irq_wake(self->pdata->irq->start); - free_irq(self->pdata->irq->start, self); + disable_irq_wake(self->pdata->irq); + free_irq(self->pdata->irq, self); } else { sdio_claim_host(self->func); ret = sdio_release_irq(self->func); @@ -174,12 +173,10 @@ static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self) static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata) { - const struct resource *reset = pdata->reset; - - if (reset) { - gpio_set_value(reset->start, 0); + if (pdata->reset) { + gpio_set_value(pdata->reset, 0); msleep(30); /* Min is 2 * CLK32K cycles */ - gpio_free(reset->start); + gpio_free(pdata->reset); } if (pdata->power_ctrl) @@ -192,20 +189,17 @@ static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata) static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata) { - const struct resource *reset = pdata->reset; - const struct resource *powerup = pdata->powerup; - /* Ensure I/Os are pulled low */ - if (reset) { - gpio_request(reset->start, reset->name); - gpio_direction_output(reset->start, 0); + if (pdata->reset) { + gpio_request(pdata->reset, "cw1200_wlan_reset"); + gpio_direction_output(pdata->reset, 0); } - if (powerup) { - gpio_request(powerup->start, powerup->name); - gpio_direction_output(powerup->start, 0); + if (pdata->powerup) { + gpio_request(pdata->powerup, "cw1200_wlan_powerup"); + gpio_direction_output(pdata->powerup, 0); } - if (reset || powerup) - msleep(50); /* Settle time */ + if (pdata->reset || pdata->powerup) + msleep(10); /* Settle time? */ /* Enable 3v3 and 1v8 to hardware */ if (pdata->power_ctrl) { @@ -225,13 +219,13 @@ static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata) } /* Enable POWERUP signal */ - if (powerup) { - gpio_set_value(powerup->start, 1); + if (pdata->powerup) { + gpio_set_value(pdata->powerup, 1); msleep(250); /* or more..? */ } /* Enable RSTn signal */ - if (reset) { - gpio_set_value(reset->start, 1); + if (pdata->reset) { + gpio_set_value(pdata->reset, 1); msleep(50); /* Or more..? */ } return 0; @@ -252,7 +246,7 @@ static int cw1200_sdio_pm(struct hwbus_priv *self, bool suspend) int ret = 0; if (self->pdata->irq) - ret = irq_set_irq_wake(self->pdata->irq->start, suspend); + ret = irq_set_irq_wake(self->pdata->irq, suspend); return ret; } @@ -274,7 +268,7 @@ static int cw1200_sdio_probe(struct sdio_func *func, pr_info("cw1200_wlan_sdio: Probe called\n"); - /* We are only able to handle the wlan function */ + /* We are only able to handle the wlan function */ if (func->num != 0x01) return -ENODEV; diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index da5d24cc67db..d8dc7c6bb96d 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -268,12 +268,10 @@ static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) { - const struct resource *reset = pdata->reset; - - if (reset) { - gpio_set_value(reset->start, 0); + if (pdata->reset) { + gpio_set_value(pdata->reset, 0); msleep(30); /* Min is 2 * CLK32K cycles */ - gpio_free(reset->start); + gpio_free(pdata->reset); } if (pdata->power_ctrl) @@ -286,19 +284,16 @@ static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata) { - const struct resource *reset = pdata->reset; - const struct resource *powerup = pdata->powerup; - /* Ensure I/Os are pulled low */ - if (reset) { - gpio_request(reset->start, reset->name); - gpio_direction_output(reset->start, 0); + if (pdata->reset) { + gpio_request(pdata->reset, "cw1200_wlan_reset"); + gpio_direction_output(pdata->reset, 0); } - if (powerup) { - gpio_request(powerup->start, powerup->name); - gpio_direction_output(powerup->start, 0); + if (pdata->powerup) { + gpio_request(pdata->powerup, "cw1200_wlan_powerup"); + gpio_direction_output(pdata->powerup, 0); } - if (reset || powerup) + if (pdata->reset || pdata->powerup) msleep(10); /* Settle time? */ /* Enable 3v3 and 1v8 to hardware */ @@ -319,13 +314,13 @@ static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata) } /* Enable POWERUP signal */ - if (powerup) { - gpio_set_value(powerup->start, 1); + if (pdata->powerup) { + gpio_set_value(pdata->powerup, 1); msleep(250); /* or more..? */ } /* Enable RSTn signal */ - if (reset) { - gpio_set_value(reset->start, 1); + if (pdata->reset) { + gpio_set_value(pdata->reset, 1); msleep(50); /* Or more..? */ } return 0; diff --git a/include/linux/platform_data/cw1200_platform.h b/include/linux/platform_data/cw1200_platform.h index c168fa512347..4454c16ea3e5 100644 --- a/include/linux/platform_data/cw1200_platform.h +++ b/include/linux/platform_data/cw1200_platform.h @@ -14,8 +14,8 @@ struct cw1200_platform_data_spi { /* All others are optional */ bool have_5ghz; - const struct resource *reset; /* GPIO to RSTn signal */ - const struct resource *powerup; /* GPIO to POWERUP signal */ + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, bool enable); /* Control 3v3 / 1v8 supply */ int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, @@ -28,11 +28,11 @@ struct cw1200_platform_data_sdio { u16 ref_clk; /* REQUIRED (in KHz) */ /* All others are optional */ - const struct resource *irq; /* if using GPIO for IRQ */ bool have_5ghz; - bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ - const struct resource *reset; /* GPIO to RSTn signal */ - const struct resource *powerup; /* GPIO to POWERUP signal */ + bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int irq; /* IRQ line or 0 to use SDIO IRQ */ int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, bool enable); /* Control 3v3 / 1v8 supply */ int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, -- cgit v1.2.3 From 7c0b6f49dbea0b09b1de8aa5c942af9c2ad8b54c Mon Sep 17 00:00:00 2001 From: Solomon Peachy Date: Sun, 2 Jun 2013 11:35:31 -0400 Subject: cw1200: Rework SDIO platform support to prevent build problems. Based on discussions with And Bergmann, this patch changes the SDIO platform code to default to supporting the Sagrad devices, allowing for it to be overridden in board setup code. This renders the cw1200_sagrad module suplerflous, so it is now removed. It also moves the documentation that was in the cw1200_sagrad source to the platform header. Signed-off-by: Solomon Peachy Signed-off-by: John W. Linville --- drivers/net/wireless/cw1200/Kconfig | 17 ++--- drivers/net/wireless/cw1200/Makefile | 2 - drivers/net/wireless/cw1200/cw1200_sagrad.c | 106 -------------------------- drivers/net/wireless/cw1200/cw1200_sdio.c | 26 ++++++- include/linux/platform_data/cw1200_platform.h | 37 ++++++++- 5 files changed, 66 insertions(+), 122 deletions(-) delete mode 100644 drivers/net/wireless/cw1200/cw1200_sagrad.c (limited to 'include') diff --git a/drivers/net/wireless/cw1200/Kconfig b/drivers/net/wireless/cw1200/Kconfig index 13e36119ae9f..7a585d4e9c8e 100644 --- a/drivers/net/wireless/cw1200/Kconfig +++ b/drivers/net/wireless/cw1200/Kconfig @@ -13,20 +13,19 @@ config CW1200_WLAN_SDIO depends on CW1200 && MMC help Enable support for the CW1200 connected via an SDIO bus. + By default this driver only supports the Sagrad SG901-1091/1098 EVK + and similar designs that utilize a hardware reset circuit. To + support different CW1200 SDIO designs you will need to override + the default platform data by calling cw1200_sdio_set_platform_data() + in your board setup file. config CW1200_WLAN_SPI tristate "Support SPI platforms" depends on CW1200 && SPI help - Enables support for the CW1200 connected via a SPI bus. - -config CW1200_WLAN_SAGRAD - tristate "Support Sagrad SG901-1091/1098 modules" - depends on CW1200_WLAN_SDIO - help - This provides the platform data glue to support the - Sagrad SG901-1091/1098 modules in their standard SDIO EVK. - It also includes example SPI platform data. + Enables support for the CW1200 connected via a SPI bus. You will + need to add appropriate platform data glue in your board setup + file. menu "Driver debug features" depends on CW1200 && DEBUG_FS diff --git a/drivers/net/wireless/cw1200/Makefile b/drivers/net/wireless/cw1200/Makefile index 1aa3682066e0..bc6cbf91f26e 100644 --- a/drivers/net/wireless/cw1200/Makefile +++ b/drivers/net/wireless/cw1200/Makefile @@ -16,9 +16,7 @@ cw1200_core-$(CONFIG_PM) += pm.o cw1200_wlan_sdio-y := cw1200_sdio.o cw1200_wlan_spi-y := cw1200_spi.o -cw1200_wlan_sagrad-y := cw1200_sagrad.o obj-$(CONFIG_CW1200) += cw1200_core.o obj-$(CONFIG_CW1200_WLAN_SDIO) += cw1200_wlan_sdio.o obj-$(CONFIG_CW1200_WLAN_SPI) += cw1200_wlan_spi.o -obj-$(CONFIG_CW1200_WLAN_SAGRAD) += cw1200_wlan_sagrad.o diff --git a/drivers/net/wireless/cw1200/cw1200_sagrad.c b/drivers/net/wireless/cw1200/cw1200_sagrad.c deleted file mode 100644 index 3f884ac96ccc..000000000000 --- a/drivers/net/wireless/cw1200/cw1200_sagrad.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Platform glue data for ST-Ericsson CW1200 driver - * - * Copyright (c) 2013, Sagrad, Inc - * Author: Solomon Peachy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include - -MODULE_AUTHOR("Solomon Peachy "); -MODULE_DESCRIPTION("ST-Ericsson CW1200 Platform glue driver"); -MODULE_LICENSE("GPL"); - -/* Define just one of these. Feel free to customize as needed */ -#define SAGRAD_1091_1098_EVK_SDIO -/* #define SAGRAD_1091_1098_EVK_SPI */ - -#ifdef SAGRAD_1091_1098_EVK_SDIO -static int cw1200_power_ctrl(const struct cw1200_platform_data_sdio *pdata, - bool enable) -{ - /* Control 3v3 and 1v8 to hardware as appropriate */ - /* Note this is not needed if it's controlled elsewhere or always on */ - - /* May require delay for power to stabilize */ - return 0; -} - -static int cw1200_clk_ctrl(const struct cw1200_platform_data_sdio *pdata, - bool enable) -{ - /* Turn CLK_32K off and on as appropriate. */ - /* Note this is not needed if it's always on */ - - /* May require delay for clock to stabilize */ - return 0; -} - -static struct cw1200_platform_data_sdio cw1200_platform_data = { - .ref_clk = 38400, - .have_5ghz = false, -#if 0 - .reset = GPIO_RF_RESET, /* Replace as appropriate */ - .powerup = GPIO_RF_POWERUP, /* Replace as appropriate */ - .irq = GPIO_TO_IRQ(GPIO_RF_IRQ), /* Replace as appropriate */ -#endif - .power_ctrl = cw1200_power_ctrl, - .clk_ctrl = cw1200_clk_ctrl, -/* .macaddr = ??? */ - .sdd_file = "sdd_sagrad_1091_1098.bin", -}; -#endif - -#ifdef SAGRAD_1091_1098_EVK_SPI -static int cw1200_power_ctrl(const struct cw1200_platform_data_spi *pdata, - bool enable) -{ - /* Control 3v3 and 1v8 to hardware as appropriate */ - /* Note this is not needed if it's controlled elsewhere or always on */ - - /* May require delay for power to stabilize */ - return 0; -} -static int cw1200_clk_ctrl(const struct cw1200_platform_data_spi *pdata, - bool enable) -{ - /* Turn CLK_32K off and on as appropriate. */ - /* Note this is not needed if it's always on */ - - /* May require delay for clock to stabilize */ - return 0; -} - -static struct cw1200_platform_data_spi cw1200_platform_data = { - .ref_clk = 38400, - .spi_bits_per_word = 16, - .reset = GPIO_RF_RESET, /* Replace as appropriate */ - .powerup = GPIO_RF_POWERUP, /* Replace as appropriate */ - .power_ctrl = cw1200_power_ctrl, - .clk_ctrl = cw1200_clk_ctrl, -/* .macaddr = ??? */ - .sdd_file = "sdd_sagrad_1091_1098.bin", -}; -static struct spi_board_info myboard_spi_devices[] __initdata = { - { - .modalias = "cw1200_wlan_spi", - .max_speed_hz = 10000000, /* 52MHz Max */ - .bus_num = 0, - .irq = WIFI_IRQ, - .platform_data = &cw1200_platform_data, - .chip_select = 0, - }, -}; -#endif - - -const void *cw1200_get_platform_data(void) -{ - return &cw1200_platform_data; -} -EXPORT_SYMBOL_GPL(cw1200_get_platform_data); diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c index 574cf727567c..4b3148e47ee7 100644 --- a/drivers/net/wireless/cw1200/cw1200_sdio.c +++ b/drivers/net/wireless/cw1200/cw1200_sdio.c @@ -29,6 +29,21 @@ MODULE_LICENSE("GPL"); #define SDIO_BLOCK_SIZE (512) +/* Default platform data for Sagrad modules */ +static struct cw1200_platform_data_sdio sagrad_109x_evk_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, + .sdd_file = "sdd_sagrad_1091_1098.bin", +}; + +/* Allow platform data to be overridden */ +static struct cw1200_platform_data_sdio *global_plat_data = &sagrad_109x_evk_platform_data; + +void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata) +{ + global_plat_data = pdata; +} + struct hwbus_priv { struct sdio_func *func; struct cw1200_common *core; @@ -261,7 +276,7 @@ static struct hwbus_ops cw1200_sdio_hwbus_ops = { /* Probe Function to be called by SDIO stack when device is discovered */ static int cw1200_sdio_probe(struct sdio_func *func, - const struct sdio_device_id *id) + const struct sdio_device_id *id) { struct hwbus_priv *self; int status; @@ -280,7 +295,7 @@ static int cw1200_sdio_probe(struct sdio_func *func, func->card->quirks |= MMC_QUIRK_LENIENT_FN0; - self->pdata = cw1200_get_platform_data(); + self->pdata = global_plat_data; /* FIXME */ self->func = func; sdio_set_drvdata(func, self); sdio_claim_host(func); @@ -374,7 +389,8 @@ static int __init cw1200_sdio_init(void) const struct cw1200_platform_data_sdio *pdata; int ret; - pdata = cw1200_get_platform_data(); + /* FIXME -- this won't support multiple devices */ + pdata = global_plat_data; if (cw1200_sdio_on(pdata)) { ret = -1; @@ -396,7 +412,9 @@ err: static void __exit cw1200_sdio_exit(void) { const struct cw1200_platform_data_sdio *pdata; - pdata = cw1200_get_platform_data(); + + /* FIXME -- this won't support multiple devices */ + pdata = global_plat_data; sdio_unregister_driver(&sdio_driver); cw1200_sdio_off(pdata); } diff --git a/include/linux/platform_data/cw1200_platform.h b/include/linux/platform_data/cw1200_platform.h index 4454c16ea3e5..c6fbc3ce4ab0 100644 --- a/include/linux/platform_data/cw1200_platform.h +++ b/include/linux/platform_data/cw1200_platform.h @@ -41,6 +41,41 @@ struct cw1200_platform_data_sdio { const char *sdd_file; /* if NULL, will use default for detected hw type */ }; -const void *cw1200_get_platform_data(void); + +/* An example of SPI support in your board setup file: + + static struct cw1200_platform_data_spi cw1200_platform_data = { + .ref_clk = 38400, + .spi_bits_per_word = 16, + .reset = GPIO_RF_RESET, + .powerup = GPIO_RF_POWERUP, + .macaddr = wifi_mac_addr, + .sdd_file = "sdd_sagrad_1091_1098.bin", + }; + static struct spi_board_info myboard_spi_devices[] __initdata = { + { + .modalias = "cw1200_wlan_spi", + .max_speed_hz = 52000000, + .bus_num = 0, + .irq = WIFI_IRQ, + .platform_data = &cw1200_platform_data, + .chip_select = 0, + }, + }; + + */ + +/* An example of SDIO support in your board setup file: + + static struct cw1200_platform_data_sdio my_cw1200_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, + .sdd_file = "sdd_myplatform.bin", + }; + cw1200_sdio_set_platform_data(&my_cw1200_platform_data); + + */ + +void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata); #endif /* CW1200_PLAT_H_INCLUDED */ -- cgit v1.2.3 From 4da2a54a842db6921289e3e25b0739531a594dea Mon Sep 17 00:00:00 2001 From: Solomon Peachy Date: Sun, 2 Jun 2013 11:35:32 -0400 Subject: cw1200: rename the cw1200 platform definition header My previous patch just moved the file, but it also needed to be renamed to conform to proper conventions. Signed-off-by: Solomon Peachy Signed-off-by: John W. Linville --- drivers/net/wireless/cw1200/cw1200_sdio.c | 2 +- drivers/net/wireless/cw1200/cw1200_spi.c | 2 +- include/linux/platform_data/cw1200_platform.h | 81 --------------------------- include/linux/platform_data/net-cw1200.h | 81 +++++++++++++++++++++++++++ 4 files changed, 83 insertions(+), 83 deletions(-) delete mode 100644 include/linux/platform_data/cw1200_platform.h create mode 100644 include/linux/platform_data/net-cw1200.h (limited to 'include') diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c index 4b3148e47ee7..bb1f405315e4 100644 --- a/drivers/net/wireless/cw1200/cw1200_sdio.c +++ b/drivers/net/wireless/cw1200/cw1200_sdio.c @@ -20,7 +20,7 @@ #include "cw1200.h" #include "hwbus.h" -#include +#include #include "hwio.h" MODULE_AUTHOR("Dmitry Tarnyagin "); diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index d8dc7c6bb96d..e58f0a5bafa9 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -25,7 +25,7 @@ #include "cw1200.h" #include "hwbus.h" -#include +#include #include "hwio.h" MODULE_AUTHOR("Solomon Peachy "); diff --git a/include/linux/platform_data/cw1200_platform.h b/include/linux/platform_data/cw1200_platform.h deleted file mode 100644 index c6fbc3ce4ab0..000000000000 --- a/include/linux/platform_data/cw1200_platform.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) ST-Ericsson SA 2011 - * - * Author: Dmitry Tarnyagin - * License terms: GNU General Public License (GPL) version 2 - */ - -#ifndef CW1200_PLAT_H_INCLUDED -#define CW1200_PLAT_H_INCLUDED - -struct cw1200_platform_data_spi { - u8 spi_bits_per_word; /* REQUIRED */ - u16 ref_clk; /* REQUIRED (in KHz) */ - - /* All others are optional */ - bool have_5ghz; - int reset; /* GPIO to RSTn signal (0 disables) */ - int powerup; /* GPIO to POWERUP signal (0 disables) */ - int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, - bool enable); /* Control 3v3 / 1v8 supply */ - int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, - bool enable); /* Control CLK32K */ - const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ - const char *sdd_file; /* if NULL, will use default for detected hw type */ -}; - -struct cw1200_platform_data_sdio { - u16 ref_clk; /* REQUIRED (in KHz) */ - - /* All others are optional */ - bool have_5ghz; - bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ - int reset; /* GPIO to RSTn signal (0 disables) */ - int powerup; /* GPIO to POWERUP signal (0 disables) */ - int irq; /* IRQ line or 0 to use SDIO IRQ */ - int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, - bool enable); /* Control 3v3 / 1v8 supply */ - int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, - bool enable); /* Control CLK32K */ - const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ - const char *sdd_file; /* if NULL, will use default for detected hw type */ -}; - - -/* An example of SPI support in your board setup file: - - static struct cw1200_platform_data_spi cw1200_platform_data = { - .ref_clk = 38400, - .spi_bits_per_word = 16, - .reset = GPIO_RF_RESET, - .powerup = GPIO_RF_POWERUP, - .macaddr = wifi_mac_addr, - .sdd_file = "sdd_sagrad_1091_1098.bin", - }; - static struct spi_board_info myboard_spi_devices[] __initdata = { - { - .modalias = "cw1200_wlan_spi", - .max_speed_hz = 52000000, - .bus_num = 0, - .irq = WIFI_IRQ, - .platform_data = &cw1200_platform_data, - .chip_select = 0, - }, - }; - - */ - -/* An example of SDIO support in your board setup file: - - static struct cw1200_platform_data_sdio my_cw1200_platform_data = { - .ref_clk = 38400, - .have_5ghz = false, - .sdd_file = "sdd_myplatform.bin", - }; - cw1200_sdio_set_platform_data(&my_cw1200_platform_data); - - */ - -void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata); - -#endif /* CW1200_PLAT_H_INCLUDED */ diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h new file mode 100644 index 000000000000..c6fbc3ce4ab0 --- /dev/null +++ b/include/linux/platform_data/net-cw1200.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Dmitry Tarnyagin + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CW1200_PLAT_H_INCLUDED +#define CW1200_PLAT_H_INCLUDED + +struct cw1200_platform_data_spi { + u8 spi_bits_per_word; /* REQUIRED */ + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + +struct cw1200_platform_data_sdio { + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int irq; /* IRQ line or 0 to use SDIO IRQ */ + int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + + +/* An example of SPI support in your board setup file: + + static struct cw1200_platform_data_spi cw1200_platform_data = { + .ref_clk = 38400, + .spi_bits_per_word = 16, + .reset = GPIO_RF_RESET, + .powerup = GPIO_RF_POWERUP, + .macaddr = wifi_mac_addr, + .sdd_file = "sdd_sagrad_1091_1098.bin", + }; + static struct spi_board_info myboard_spi_devices[] __initdata = { + { + .modalias = "cw1200_wlan_spi", + .max_speed_hz = 52000000, + .bus_num = 0, + .irq = WIFI_IRQ, + .platform_data = &cw1200_platform_data, + .chip_select = 0, + }, + }; + + */ + +/* An example of SDIO support in your board setup file: + + static struct cw1200_platform_data_sdio my_cw1200_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, + .sdd_file = "sdd_myplatform.bin", + }; + cw1200_sdio_set_platform_data(&my_cw1200_platform_data); + + */ + +void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata); + +#endif /* CW1200_PLAT_H_INCLUDED */ -- cgit v1.2.3 From d6d23de2786edca61fb9813ff7cdc7d2543d08a7 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 4 Jun 2013 12:15:42 +0200 Subject: mac80211: add a tx control flag to indicate PS-Poll/uAPSD response Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- include/net/mac80211.h | 3 +++ net/mac80211/sta_info.c | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 1f0014bd4d87..cb37f82d8d09 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -460,6 +460,8 @@ struct ieee80211_bss_conf { * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it * would be fragmented by size (this is optional, only used for * monitor injection). + * @IEEE80211_TX_CTL_PS_RESPONSE: This frame is a response to a poll + * frame (PS-Poll or uAPSD). * * Note: If you have to add new flags to the enumeration, then don't * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. @@ -495,6 +497,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_STATUS_EOSP = BIT(28), IEEE80211_TX_CTL_USE_MINRATE = BIT(29), IEEE80211_TX_CTL_DONTFRAG = BIT(30), + IEEE80211_TX_CTL_PS_RESPONSE = BIT(31), }; #define IEEE80211_TX_CTL_STBC_SHIFT 23 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a04c5671d7fd..b4297982d34a 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1132,6 +1132,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, * ends the poll/service period. */ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_CTL_PS_RESPONSE | IEEE80211_TX_STATUS_EOSP | IEEE80211_TX_CTL_REQ_TX_STATUS; @@ -1269,7 +1270,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta, * STA may still remain is PS mode after this frame * exchange. */ - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_CTL_PS_RESPONSE; /* * Use MoreData flag to indicate whether there are -- cgit v1.2.3 From 6ff57cf88807dd81300b5b9c623dc5eb6422b9f6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 16 May 2013 00:55:00 +0200 Subject: cfg80211/mac80211: clean up cfg80211 SME APIs Do some cleanups in the cfg80211 SME APIs, which are only used by mac80211. Most of these functions get a frame passed, and there isn't really any reason to export multiple functions as cfg80211 can check the frame type instead, do that. Additionally, the API functions have confusing names like cfg80211_send_...() which was meant to indicate that it sends an event to userspace, but gets a bit confusing when there's both TX and RX and they're not all clearly labeled. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 91 ++++++++++++++++++++------------------------------ net/mac80211/mlme.c | 44 ++++++++++++------------ net/mac80211/rx.c | 26 ++++++--------- net/wireless/mlme.c | 86 +++++++++++++++++++++++++++++------------------ net/wireless/nl80211.c | 30 ++++++++--------- net/wireless/trace.h | 46 ++++++++++++++++++------- 6 files changed, 170 insertions(+), 153 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 6169fca216b4..195330d4ef34 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3432,59 +3432,66 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); /** - * cfg80211_send_rx_auth - notification of processed authentication + * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame * @dev: network device * @buf: authentication frame (header + body) * @len: length of the frame data * - * This function is called whenever an authentication has been processed in - * station mode. The driver is required to call either this function or - * cfg80211_send_auth_timeout() to indicate the result of cfg80211_ops::auth() - * call. This function may sleep. The caller must hold the corresponding wdev's - * mutex. + * This function is called whenever an authentication, disassociation or + * deauthentication frame has been received and processed in station mode. + * After being asked to authenticate via cfg80211_ops::auth() the driver must + * call either this function or cfg80211_auth_timeout(). + * After being asked to associate via cfg80211_ops::assoc() the driver must + * call either this function or cfg80211_auth_timeout(). + * While connected, the driver must calls this for received and processed + * disassociation and deauthentication frames. If the frame couldn't be used + * because it was unprotected, the driver must call the function + * cfg80211_rx_unprot_mlme_mgmt() instead. + * + * This function may sleep. The caller must hold the corresponding wdev's mutex. */ -void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len); +void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** - * cfg80211_send_auth_timeout - notification of timed out authentication + * cfg80211_auth_timeout - notification of timed out authentication * @dev: network device * @addr: The MAC address of the device with which the authentication timed out * * This function may sleep. The caller must hold the corresponding wdev's * mutex. */ -void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr); +void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); /** - * cfg80211_send_rx_assoc - notification of processed association + * cfg80211_rx_assoc_resp - notification of processed association response * @dev: network device - * @bss: the BSS struct association was requested for, the struct reference - * is owned by cfg80211 after this call - * @buf: (re)association response frame (header + body) + * @bss: the BSS that association was requested with, ownership of the pointer + * moves to cfg80211 in this call + * @buf: authentication frame (header + body) * @len: length of the frame data * - * This function is called whenever a (re)association response has been - * processed in station mode. The driver is required to call either this - * function or cfg80211_send_assoc_timeout() to indicate the result of - * cfg80211_ops::assoc() call. This function may sleep. The caller must hold - * the corresponding wdev's mutex. + * After being asked to associate via cfg80211_ops::assoc() the driver must + * call either this function or cfg80211_auth_timeout(). + * + * This function may sleep. The caller must hold the corresponding wdev's mutex. */ -void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, +void cfg80211_rx_assoc_resp(struct net_device *dev, + struct cfg80211_bss *bss, const u8 *buf, size_t len); /** - * cfg80211_send_assoc_timeout - notification of timed out association + * cfg80211_assoc_timeout - notification of timed out association * @dev: network device * @addr: The MAC address of the device with which the association timed out * * This function may sleep. The caller must hold the corresponding wdev's mutex. */ -void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr); +void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr); /** - * cfg80211_send_deauth - notification of processed deauthentication + * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame * @dev: network device - * @buf: deauthentication frame (header + body) + * @buf: 802.11 frame (header + body) * @len: length of the frame data * * This function is called whenever deauthentication has been processed in @@ -3492,46 +3499,20 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr); * locally generated ones. This function may sleep. The caller must hold the * corresponding wdev's mutex. */ -void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); +void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** - * cfg80211_send_disassoc - notification of processed disassociation - * @dev: network device - * @buf: disassociation response frame (header + body) - * @len: length of the frame data - * - * This function is called whenever disassociation has been processed in - * station mode. This includes both received disassociation frames and locally - * generated ones. This function may sleep. The caller must hold the - * corresponding wdev's mutex. - */ -void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len); - -/** - * cfg80211_send_unprot_deauth - notification of unprotected deauthentication + * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame * @dev: network device * @buf: deauthentication frame (header + body) * @len: length of the frame data * - * This function is called whenever a received Deauthentication frame has been - * dropped in station mode because of MFP being used but the Deauthentication - * frame was not protected. This function may sleep. - */ -void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf, - size_t len); - -/** - * cfg80211_send_unprot_disassoc - notification of unprotected disassociation - * @dev: network device - * @buf: disassociation frame (header + body) - * @len: length of the frame data - * - * This function is called whenever a received Disassociation frame has been - * dropped in station mode because of MFP being used but the Disassociation + * This function is called whenever a received deauthentication or dissassoc + * frame has been dropped in station mode because of MFP being used but the * frame was not protected. This function may sleep. */ -void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf, - size_t len); +void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, + const u8 *buf, size_t len); /** * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9950e13f641b..df8170a80a56 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2155,7 +2155,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); - cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); sdata_unlock(sdata); } @@ -2302,7 +2303,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); ieee80211_destroy_auth_data(sdata, false); - cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); return; } @@ -2337,7 +2338,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, * Report auth frame to user space for processing since another * round of Authentication frames is still needed. */ - cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); return; } @@ -2354,7 +2355,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, } mutex_unlock(&sdata->local->sta_mtx); - cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); return; out_err: mutex_unlock(&sdata->local->sta_mtx); @@ -2387,7 +2388,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, 0, 0, false, NULL); - cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, len); + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); } @@ -2413,7 +2414,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, 0, 0, false, NULL); - cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, len); + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); } static void ieee80211_get_rates(struct ieee80211_supported_band *sband, @@ -2711,7 +2712,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, false); cfg80211_put_bss(sdata->local->hw.wiphy, bss); - cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); + cfg80211_assoc_timeout(sdata->dev, mgmt->bssid); return; } sdata_info(sdata, "associated\n"); @@ -2724,7 +2725,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, ieee80211_destroy_assoc_data(sdata, true); } - cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, len); + cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, @@ -3117,8 +3118,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, true, deauth_buf); - cfg80211_send_deauth(sdata->dev, deauth_buf, - sizeof(deauth_buf)); + cfg80211_tx_mlme_mgmt(sdata->dev, deauth_buf, + sizeof(deauth_buf)); return; } @@ -3236,7 +3237,8 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, tx, frame_buf); - cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); } static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) @@ -3427,7 +3429,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) ieee80211_destroy_auth_data(sdata, false); - cfg80211_send_auth_timeout(sdata->dev, bssid); + cfg80211_auth_timeout(sdata->dev, bssid); } } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) run_again(sdata, ifmgd->auth_data->timeout); @@ -3443,7 +3445,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) ieee80211_destroy_assoc_data(sdata, false); - cfg80211_send_assoc_timeout(sdata->dev, bssid); + cfg80211_assoc_timeout(sdata->dev, bssid); } } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) run_again(sdata, ifmgd->assoc_data->timeout); @@ -3992,8 +3994,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, WLAN_REASON_UNSPECIFIED, false, frame_buf); - cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + sizeof(frame_buf)); } sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -4055,8 +4057,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, WLAN_REASON_UNSPECIFIED, false, frame_buf); - cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + sizeof(frame_buf)); } if (ifmgd->auth_data && !ifmgd->auth_data->done) { @@ -4309,8 +4311,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, out: if (report_frame) - cfg80211_send_deauth(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); return 0; } @@ -4340,8 +4342,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, req->reason_code, !req->local_state_change, frame_buf); - cfg80211_send_disassoc(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); return 0; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bdd7b4a719e9..23dbcfc69b3b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1747,27 +1747,21 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) if (unlikely(!ieee80211_has_protected(fc) && ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && rx->key)) { - if (ieee80211_is_deauth(fc)) - cfg80211_send_unprot_deauth(rx->sdata->dev, - rx->skb->data, - rx->skb->len); - else if (ieee80211_is_disassoc(fc)) - cfg80211_send_unprot_disassoc(rx->sdata->dev, - rx->skb->data, - rx->skb->len); + if (ieee80211_is_deauth(fc) || + ieee80211_is_disassoc(fc)) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + rx->skb->data, + rx->skb->len); return -EACCES; } /* BIP does not use Protected field, so need to check MMIE */ if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && ieee80211_get_mmie_keyidx(rx->skb) < 0)) { - if (ieee80211_is_deauth(fc)) - cfg80211_send_unprot_deauth(rx->sdata->dev, - rx->skb->data, - rx->skb->len); - else if (ieee80211_is_disassoc(fc)) - cfg80211_send_unprot_disassoc(rx->sdata->dev, - rx->skb->data, - rx->skb->len); + if (ieee80211_is_deauth(fc) || + ieee80211_is_disassoc(fc)) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + rx->skb->data, + rx->skb->len); return -EACCES; } /* diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 7bde5d9c0003..4b9c2be0d56d 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -18,20 +18,7 @@ #include "rdev-ops.h" -void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); - - trace_cfg80211_send_rx_auth(dev); - - nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); - cfg80211_sme_rx_auth(dev, buf, len); -} -EXPORT_SYMBOL(cfg80211_send_rx_auth); - -void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, +void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, const u8 *buf, size_t len) { u16 status_code; @@ -84,10 +71,10 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, status_code, status_code == WLAN_STATUS_SUCCESS, bss); } -EXPORT_SYMBOL(cfg80211_send_rx_assoc); +EXPORT_SYMBOL(cfg80211_rx_assoc_resp); -void cfg80211_send_deauth(struct net_device *dev, - const u8 *buf, size_t len) +static void cfg80211_process_deauth(struct net_device *dev, + const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; @@ -96,9 +83,6 @@ void cfg80211_send_deauth(struct net_device *dev, const u8 *bssid = mgmt->bssid; bool was_current = false; - trace_cfg80211_send_deauth(dev); - ASSERT_WDEV_LOCK(wdev); - if (wdev->current_bss && ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_unhold_bss(wdev->current_bss); @@ -123,10 +107,9 @@ void cfg80211_send_deauth(struct net_device *dev, false, NULL); } } -EXPORT_SYMBOL(cfg80211_send_deauth); -void cfg80211_send_disassoc(struct net_device *dev, - const u8 *buf, size_t len) +static void cfg80211_process_disassoc(struct net_device *dev, + const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; @@ -136,9 +119,6 @@ void cfg80211_send_disassoc(struct net_device *dev, u16 reason_code; bool from_ap; - trace_cfg80211_send_disassoc(dev); - ASSERT_WDEV_LOCK(wdev); - nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); if (wdev->sme_state != CFG80211_SME_CONNECTED) @@ -153,15 +133,38 @@ void cfg80211_send_disassoc(struct net_device *dev, } else WARN_ON(1); - reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } -EXPORT_SYMBOL(cfg80211_send_disassoc); -void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) +void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct ieee80211_mgmt *mgmt = (void *)buf; + + ASSERT_WDEV_LOCK(wdev); + + trace_cfg80211_rx_mlme_mgmt(dev, buf, len); + + if (WARN_ON(len < 2)) + return; + + if (ieee80211_is_auth(mgmt->frame_control)) { + nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); + cfg80211_sme_rx_auth(dev, buf, len); + } else if (ieee80211_is_deauth(mgmt->frame_control)) { + cfg80211_process_deauth(dev, buf, len); + } else if (ieee80211_is_disassoc(mgmt->frame_control)) { + cfg80211_process_disassoc(dev, buf, len); + } +} +EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt); + +void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; @@ -175,9 +178,9 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); } -EXPORT_SYMBOL(cfg80211_send_auth_timeout); +EXPORT_SYMBOL(cfg80211_auth_timeout); -void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) +void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; @@ -191,7 +194,26 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); } -EXPORT_SYMBOL(cfg80211_send_assoc_timeout); +EXPORT_SYMBOL(cfg80211_assoc_timeout); + +void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct ieee80211_mgmt *mgmt = (void *)buf; + + ASSERT_WDEV_LOCK(wdev); + + trace_cfg80211_tx_mlme_mgmt(dev, buf, len); + + if (WARN_ON(len < 2)) + return; + + if (ieee80211_is_deauth(mgmt->frame_control)) + cfg80211_process_deauth(dev, buf, len); + else + cfg80211_process_disassoc(dev, buf, len); +} +EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt); void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, enum nl80211_key_type key_type, int key_id, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ce949e38178c..444f5effb77f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9313,31 +9313,27 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, NL80211_CMD_DISASSOCIATE, gfp); } -void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf, - size_t len) +void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, + size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + const struct ieee80211_mgmt *mgmt = (void *)buf; + u32 cmd; - trace_cfg80211_send_unprot_deauth(dev); - nl80211_send_mlme_event(rdev, dev, buf, len, - NL80211_CMD_UNPROT_DEAUTHENTICATE, GFP_ATOMIC); -} -EXPORT_SYMBOL(cfg80211_send_unprot_deauth); + if (WARN_ON(len < 2)) + return; -void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf, - size_t len) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + if (ieee80211_is_deauth(mgmt->frame_control)) + cmd = NL80211_CMD_UNPROT_DEAUTHENTICATE; + else + cmd = NL80211_CMD_UNPROT_DISASSOCIATE; - trace_cfg80211_send_unprot_disassoc(dev); - nl80211_send_mlme_event(rdev, dev, buf, len, - NL80211_CMD_UNPROT_DISASSOCIATE, GFP_ATOMIC); + trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len); + nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC); } -EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); +EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt); static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, int cmd, diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 23fafeae8a10..e1534baf2ebb 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1911,24 +1911,46 @@ TRACE_EVENT(cfg80211_send_rx_assoc, NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG) ); -DEFINE_EVENT(netdev_evt_only, cfg80211_send_deauth, - TP_PROTO(struct net_device *netdev), - TP_ARGS(netdev) +DECLARE_EVENT_CLASS(netdev_frame_event, + TP_PROTO(struct net_device *netdev, const u8 *buf, int len), + TP_ARGS(netdev, buf, len), + TP_STRUCT__entry( + NETDEV_ENTRY + __dynamic_array(u8, frame, len) + ), + TP_fast_assign( + NETDEV_ASSIGN; + memcpy(__get_dynamic_array(frame), buf, len); + ), + TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", + NETDEV_PR_ARG, + le16_to_cpup((__le16 *)__get_dynamic_array(frame))) ); -DEFINE_EVENT(netdev_evt_only, cfg80211_send_disassoc, - TP_PROTO(struct net_device *netdev), - TP_ARGS(netdev) +DEFINE_EVENT(netdev_frame_event, cfg80211_rx_unprot_mlme_mgmt, + TP_PROTO(struct net_device *netdev, const u8 *buf, int len), + TP_ARGS(netdev, buf, len) ); -DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_deauth, - TP_PROTO(struct net_device *netdev), - TP_ARGS(netdev) +DEFINE_EVENT(netdev_frame_event, cfg80211_rx_mlme_mgmt, + TP_PROTO(struct net_device *netdev, const u8 *buf, int len), + TP_ARGS(netdev, buf, len) ); -DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_disassoc, - TP_PROTO(struct net_device *netdev), - TP_ARGS(netdev) +TRACE_EVENT(cfg80211_tx_mlme_mgmt, + TP_PROTO(struct net_device *netdev, const u8 *buf, int len), + TP_ARGS(netdev, buf, len), + TP_STRUCT__entry( + NETDEV_ENTRY + __dynamic_array(u8, frame, len) + ), + TP_fast_assign( + NETDEV_ASSIGN; + memcpy(__get_dynamic_array(frame), buf, len); + ), + TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", + NETDEV_PR_ARG, + le16_to_cpup((__le16 *)__get_dynamic_array(frame))) ); DECLARE_EVENT_CLASS(netdev_mac_evt, -- cgit v1.2.3 From ceca7b7121795ef81bd598a240d53a925662d0c1 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 16 May 2013 00:55:45 +0200 Subject: cfg80211: separate internal SME implementation The current internal SME implementation in cfg80211 is very mixed up with the MLME handling, which has been causing issues for a long time. There are three things that the implementation has to provide: * a basic SME implementation for nl80211's connect() call (for drivers implementing auth/assoc, which is really just mac80211) and wireless extensions * MLME events for the userspace SME * SME events (connected, disconnected etc.) for all different SME implementation possibilities (driver, cfg80211 and userspace) To achieve these goals it isn't necessary to track the software SME's connection status outside of it's state (which is the part that caused many issues.) Instead, track it only in the SME data (wdev->conn) and in the general case only track whether the wdev is connected or not (via wdev->current_bss.) Also separate the internal implementation to not have callbacks from the SME events, but rather call it from the API functions that the driver (or rather mac80211) calls. This separates the code better. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 5 - net/wireless/core.c | 1 - net/wireless/core.h | 30 +-- net/wireless/ibss.c | 6 - net/wireless/mlme.c | 191 ++++++----------- net/wireless/nl80211.c | 5 +- net/wireless/sme.c | 542 +++++++++++++++++++++++------------------------- net/wireless/wext-sme.c | 8 +- 8 files changed, 335 insertions(+), 453 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 195330d4ef34..5b208064f1bd 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2898,11 +2898,6 @@ struct wireless_dev { /* currently used for IBSS and SME - might be rearranged later */ u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len, mesh_id_len, mesh_id_up_len; - enum { - CFG80211_SME_IDLE, - CFG80211_SME_CONNECTING, - CFG80211_SME_CONNECTED, - } sme_state; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; diff --git a/net/wireless/core.c b/net/wireless/core.c index f553b9484c1e..221e76b53a97 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -821,7 +821,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, pr_err("failed to add phy80211 symlink to netdev!\n"); } wdev->netdev = dev; - wdev->sme_state = CFG80211_SME_IDLE; #ifdef CONFIG_CFG80211_WEXT wdev->wext.default_key = -1; wdev->wext.default_mgmt_key = -1; diff --git a/net/wireless/core.h b/net/wireless/core.h index a65eaf8a84c1..a6b45bf00f33 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -308,11 +308,6 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, bool local_state_change); void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, struct net_device *dev); -void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, - u16 status, bool wextev, - struct cfg80211_bss *bss); int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid, u16 frame_type, const u8 *match_data, int match_len); @@ -328,12 +323,19 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, const struct ieee80211_vht_cap *vht_capa_mask); -/* SME */ +/* SME events */ int cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys, const u8 *prev_bssid); +void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, bool wextev, + struct cfg80211_bss *bss); +void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, + size_t ie_len, u16 reason, bool from_ap); int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev); @@ -344,21 +346,21 @@ void __cfg80211_roamed(struct wireless_dev *wdev, int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); +/* SME implementation */ void cfg80211_conn_work(struct work_struct *work); -void cfg80211_sme_failed_assoc(struct wireless_dev *wdev); -bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev); +void cfg80211_sme_scan_done(struct net_device *dev); +bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status); +void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len); +void cfg80211_sme_disassoc(struct wireless_dev *wdev); +void cfg80211_sme_deauth(struct wireless_dev *wdev); +void cfg80211_sme_auth_timeout(struct wireless_dev *wdev); +void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev); /* internal helpers */ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher); int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr); -void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, - size_t ie_len, u16 reason, bool from_ap); -void cfg80211_sme_scan_done(struct net_device *dev); -void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len); -void cfg80211_sme_disassoc(struct net_device *dev, - struct cfg80211_internal_bss *bss); void __cfg80211_scan_done(struct work_struct *wk); void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); void __cfg80211_sched_scan_results(struct work_struct *wk); diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 5449c5a6de84..39bff7d36768 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -43,7 +43,6 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); - wdev->sme_state = CFG80211_SME_CONNECTED; cfg80211_upload_connect_keys(wdev); nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, @@ -64,8 +63,6 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) trace_cfg80211_ibss_joined(dev, bssid); - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); - ev = kzalloc(sizeof(*ev), gfp); if (!ev) return; @@ -120,7 +117,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, #ifdef CONFIG_CFG80211_WEXT wdev->wext.ibss.chandef = params->chandef; #endif - wdev->sme_state = CFG80211_SME_CONNECTING; err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan, params->channel_fixed @@ -134,7 +130,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, err = rdev_join_ibss(rdev, dev, params); if (err) { wdev->connect_keys = NULL; - wdev->sme_state = CFG80211_SME_IDLE; return err; } @@ -186,7 +181,6 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) } wdev->current_bss = NULL; - wdev->sme_state = CFG80211_SME_IDLE; wdev->ssid_len = 0; #ifdef CONFIG_CFG80211_WEXT if (!nowext) diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 4b9c2be0d56d..a61a44bc6cf0 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -21,129 +21,85 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, const u8 *buf, size_t len) { - u16 status_code; struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; u8 *ie = mgmt->u.assoc_resp.variable; int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); + u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); trace_cfg80211_send_rx_assoc(dev, bss); - status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); - /* * This is a bit of a hack, we don't notify userspace of * a (re-)association reply if we tried to send a reassoc * and got a reject -- we only try again with an assoc * frame instead of reassoc. */ - if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && - cfg80211_sme_failed_reassoc(wdev)) { + if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) { cfg80211_put_bss(wiphy, bss); return; } nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); - - if (status_code != WLAN_STATUS_SUCCESS && wdev->conn) { - cfg80211_sme_failed_assoc(wdev); - /* - * do not call connect_result() now because the - * sme will schedule work that does it later. - */ - cfg80211_put_bss(wiphy, bss); - return; - } - - if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) { - /* - * This is for the userspace SME, the CONNECTING - * state will be changed to CONNECTED by - * __cfg80211_connect_result() below. - */ - wdev->sme_state = CFG80211_SME_CONNECTING; - } - - /* this consumes the bss reference */ + /* update current_bss etc., consumes the bss reference */ __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, status_code, status_code == WLAN_STATUS_SUCCESS, bss); } EXPORT_SYMBOL(cfg80211_rx_assoc_resp); -static void cfg80211_process_deauth(struct net_device *dev, +static void cfg80211_process_auth(struct wireless_dev *wdev, + const u8 *buf, size_t len) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + + nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL); + cfg80211_sme_rx_auth(wdev, buf, len); +} + +static void cfg80211_process_deauth(struct wireless_dev *wdev, const u8 *buf, size_t len) { - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; - bool was_current = false; - - if (wdev->current_bss && - ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { - cfg80211_unhold_bss(wdev->current_bss); - cfg80211_put_bss(wiphy, &wdev->current_bss->pub); - wdev->current_bss = NULL; - was_current = true; - } + u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); + bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); - nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); + nl80211_send_deauth(rdev, wdev->netdev, buf, len, GFP_KERNEL); - if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) { - u16 reason_code; - bool from_ap; - - reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); + if (!wdev->current_bss || + !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) + return; - from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); - __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); - } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { - __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - false, NULL); - } + __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); + cfg80211_sme_deauth(wdev); } -static void cfg80211_process_disassoc(struct net_device *dev, +static void cfg80211_process_disassoc(struct wireless_dev *wdev, const u8 *buf, size_t len) { - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; - u16 reason_code; - bool from_ap; + u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); + bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); - nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); + nl80211_send_disassoc(rdev, wdev->netdev, buf, len, GFP_KERNEL); - if (wdev->sme_state != CFG80211_SME_CONNECTED) + if (WARN_ON(!wdev->current_bss || + !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return; - if (wdev->current_bss && - ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { - cfg80211_sme_disassoc(dev, wdev->current_bss); - cfg80211_unhold_bss(wdev->current_bss); - cfg80211_put_bss(wiphy, &wdev->current_bss->pub); - wdev->current_bss = NULL; - } else - WARN_ON(1); - - reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); - - from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); - __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); + __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); + cfg80211_sme_disassoc(wdev); } void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (void *)buf; ASSERT_WDEV_LOCK(wdev); @@ -153,14 +109,12 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) if (WARN_ON(len < 2)) return; - if (ieee80211_is_auth(mgmt->frame_control)) { - nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); - cfg80211_sme_rx_auth(dev, buf, len); - } else if (ieee80211_is_deauth(mgmt->frame_control)) { - cfg80211_process_deauth(dev, buf, len); - } else if (ieee80211_is_disassoc(mgmt->frame_control)) { - cfg80211_process_disassoc(dev, buf, len); - } + if (ieee80211_is_auth(mgmt->frame_control)) + cfg80211_process_auth(wdev, buf, len); + else if (ieee80211_is_deauth(mgmt->frame_control)) + cfg80211_process_deauth(wdev, buf, len); + else if (ieee80211_is_disassoc(mgmt->frame_control)) + cfg80211_process_disassoc(wdev, buf, len); } EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt); @@ -173,10 +127,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr) trace_cfg80211_send_auth_timeout(dev, addr); nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); - if (wdev->sme_state == CFG80211_SME_CONNECTING) - __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - false, NULL); + cfg80211_sme_auth_timeout(wdev); } EXPORT_SYMBOL(cfg80211_auth_timeout); @@ -189,10 +140,7 @@ void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr) trace_cfg80211_send_assoc_timeout(dev, addr); nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); - if (wdev->sme_state == CFG80211_SME_CONNECTING) - __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - false, NULL); + cfg80211_sme_assoc_timeout(wdev); } EXPORT_SYMBOL(cfg80211_assoc_timeout); @@ -209,9 +157,9 @@ void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) return; if (ieee80211_is_deauth(mgmt->frame_control)) - cfg80211_process_deauth(dev, buf, len); + cfg80211_process_deauth(wdev, buf, len); else - cfg80211_process_disassoc(dev, buf, len); + cfg80211_process_disassoc(wdev, buf, len); } EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt); @@ -336,21 +284,12 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; - bool was_connected = false; ASSERT_WDEV_LOCK(wdev); - if (wdev->current_bss && req->prev_bssid && - ether_addr_equal(wdev->current_bss->pub.bssid, req->prev_bssid)) { - /* - * Trying to reassociate: Allow this to proceed and let the old - * association to be dropped when the new one is completed. - */ - if (wdev->sme_state == CFG80211_SME_CONNECTED) { - was_connected = true; - wdev->sme_state = CFG80211_SME_CONNECTING; - } - } else if (wdev->current_bss) + if (wdev->current_bss && + (!req->prev_bssid || !ether_addr_equal(wdev->current_bss->pub.bssid, + req->prev_bssid))) return -EALREADY; cfg80211_oper_and_ht_capa(&req->ht_capa_mask, @@ -360,11 +299,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); - if (!req->bss) { - if (was_connected) - wdev->sme_state = CFG80211_SME_CONNECTED; + if (!req->bss) return -ENOENT; - } err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED); if (err) @@ -373,11 +309,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, err = rdev_assoc(rdev, dev, req); out: - if (err) { - if (was_connected) - wdev->sme_state = CFG80211_SME_CONNECTED; + if (err) cfg80211_put_bss(&rdev->wiphy, req->bss); - } return err; } @@ -398,8 +331,9 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, ASSERT_WDEV_LOCK(wdev); - if (local_state_change && (!wdev->current_bss || - !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) + if (local_state_change && + (!wdev->current_bss || + !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return 0; return rdev_deauth(rdev, dev, &req); @@ -417,13 +351,11 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, .ie = ie, .ie_len = ie_len, }; + int err; ASSERT_WDEV_LOCK(wdev); - if (wdev->sme_state != CFG80211_SME_CONNECTED) - return -ENOTCONN; - - if (WARN(!wdev->current_bss, "sme_state=%d\n", wdev->sme_state)) + if (!wdev->current_bss) return -ENOTCONN; if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) @@ -431,7 +363,13 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, else return -ENOTCONN; - return rdev_disassoc(rdev, dev, &req); + err = rdev_disassoc(rdev, dev, &req); + if (err) + return err; + + /* driver should have reported the disassoc */ + WARN_ON(wdev->current_bss); + return 0; } void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, @@ -439,10 +377,6 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, { struct wireless_dev *wdev = dev->ieee80211_ptr; u8 bssid[ETH_ALEN]; - struct cfg80211_deauth_request req = { - .reason_code = WLAN_REASON_DEAUTH_LEAVING, - .bssid = bssid, - }; ASSERT_WDEV_LOCK(wdev); @@ -453,13 +387,8 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, return; memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); - rdev_deauth(rdev, dev, &req); - - if (wdev->current_bss) { - cfg80211_unhold_bss(wdev->current_bss); - cfg80211_put_bss(&rdev->wiphy, &wdev->current_bss->pub); - wdev->current_bss = NULL; - } + cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, + WLAN_REASON_DEAUTH_LEAVING, false); } struct cfg80211_mgmt_registration { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 444f5effb77f..88e820b73674 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -800,12 +800,9 @@ static int nl80211_key_allowed(struct wireless_dev *wdev) case NL80211_IFTYPE_MESH_POINT: break; case NL80211_IFTYPE_ADHOC: - if (!wdev->current_bss) - return -ENOLINK; - break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: - if (wdev->sme_state != CFG80211_SME_CONNECTED) + if (!wdev->current_bss) return -ENOLINK; break; default: diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 81be95f3be74..ae7e2cbf45cb 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -1,5 +1,7 @@ /* - * SME code for cfg80211's connect emulation. + * SME code for cfg80211 + * both driver SME event handling and the SME implementation + * (for nl80211's connect() and wext) * * Copyright 2009 Johannes Berg * Copyright (C) 2009 Intel Corporation. All rights reserved. @@ -18,18 +20,24 @@ #include "reg.h" #include "rdev-ops.h" +/* + * Software SME in cfg80211, using auth/assoc/deauth calls to the + * driver. This is is for implementing nl80211's connect/disconnect + * and wireless extensions (if configured.) + */ + struct cfg80211_conn { struct cfg80211_connect_params params; /* these are sub-states of the _CONNECTING sme_state */ enum { - CFG80211_CONN_IDLE, CFG80211_CONN_SCANNING, CFG80211_CONN_SCAN_AGAIN, CFG80211_CONN_AUTHENTICATE_NEXT, CFG80211_CONN_AUTHENTICATING, CFG80211_CONN_ASSOCIATE_NEXT, CFG80211_CONN_ASSOCIATING, - CFG80211_CONN_DEAUTH_ASSOC_FAIL, + CFG80211_CONN_DEAUTH, + CFG80211_CONN_CONNECTED, } state; u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; u8 *ie; @@ -37,39 +45,16 @@ struct cfg80211_conn { bool auto_auth, prev_bssid_valid; }; -static bool cfg80211_is_all_idle(void) +static void cfg80211_sme_free(struct wireless_dev *wdev) { - struct cfg80211_registered_device *rdev; - struct wireless_dev *wdev; - bool is_all_idle = true; - - /* - * All devices must be idle as otherwise if you are actively - * scanning some new beacon hints could be learned and would - * count as new regulatory hints. - */ - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - list_for_each_entry(wdev, &rdev->wdev_list, list) { - wdev_lock(wdev); - if (wdev->sme_state != CFG80211_SME_IDLE) - is_all_idle = false; - wdev_unlock(wdev); - } - } - - return is_all_idle; -} + if (!wdev->conn) + return; -static void disconnect_work(struct work_struct *work) -{ - rtnl_lock(); - if (cfg80211_is_all_idle()) - regulatory_hint_disconnect(); - rtnl_unlock(); + kfree(wdev->conn->ie); + kfree(wdev->conn); + wdev->conn = NULL; } -static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); - static int cfg80211_conn_scan(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); @@ -164,6 +149,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) params = &wdev->conn->params; switch (wdev->conn->state) { + case CFG80211_CONN_SCANNING: + /* didn't find it during scan ... */ + return -ENOENT; case CFG80211_CONN_SCAN_AGAIN: return cfg80211_conn_scan(wdev); case CFG80211_CONN_AUTHENTICATE_NEXT: @@ -200,12 +188,11 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) WLAN_REASON_DEAUTH_LEAVING, false); return err; - case CFG80211_CONN_DEAUTH_ASSOC_FAIL: + case CFG80211_CONN_DEAUTH: cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); - /* return an error so that we call __cfg80211_connect_result() */ - return -EINVAL; + return 0; default: return 0; } @@ -229,7 +216,8 @@ void cfg80211_conn_work(struct work_struct *work) wdev_unlock(wdev); continue; } - if (wdev->sme_state != CFG80211_SME_CONNECTING || !wdev->conn) { + if (!wdev->conn || + wdev->conn->state == CFG80211_CONN_CONNECTED) { wdev_unlock(wdev); continue; } @@ -237,12 +225,14 @@ void cfg80211_conn_work(struct work_struct *work) memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); bssid = bssid_buf; } - if (cfg80211_conn_do_work(wdev)) + if (cfg80211_conn_do_work(wdev)) { __cfg80211_connect_result( wdev->netdev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); + cfg80211_sme_free(wdev); + } wdev_unlock(wdev); } @@ -286,9 +276,6 @@ static void __cfg80211_sme_scan_done(struct net_device *dev) ASSERT_WDEV_LOCK(wdev); - if (wdev->sme_state != CFG80211_SME_CONNECTING) - return; - if (!wdev->conn) return; @@ -297,20 +284,10 @@ static void __cfg80211_sme_scan_done(struct net_device *dev) return; bss = cfg80211_get_conn_bss(wdev); - if (bss) { + if (bss) cfg80211_put_bss(&rdev->wiphy, bss); - } else { - /* not found */ - if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) - schedule_work(&rdev->conn_work); - else - __cfg80211_connect_result( - wdev->netdev, - wdev->conn->params.bssid, - NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - false, NULL); - } + else + schedule_work(&rdev->conn_work); } void cfg80211_sme_scan_done(struct net_device *dev) @@ -322,10 +299,8 @@ void cfg80211_sme_scan_done(struct net_device *dev) wdev_unlock(wdev); } -void cfg80211_sme_rx_auth(struct net_device *dev, - const u8 *buf, size_t len) +void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) { - struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; @@ -333,11 +308,7 @@ void cfg80211_sme_rx_auth(struct net_device *dev, ASSERT_WDEV_LOCK(wdev); - /* should only RX auth frames when connecting */ - if (wdev->sme_state != CFG80211_SME_CONNECTING) - return; - - if (WARN_ON(!wdev->conn)) + if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED) return; if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && @@ -366,46 +337,226 @@ void cfg80211_sme_rx_auth(struct net_device *dev, wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); } else if (status_code != WLAN_STATUS_SUCCESS) { - __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, + __cfg80211_connect_result(wdev->netdev, mgmt->bssid, + NULL, 0, NULL, 0, status_code, false, NULL); - } else if (wdev->sme_state == CFG80211_SME_CONNECTING && - wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { + } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); } } -bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev) +bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status) { - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - if (WARN_ON(!wdev->conn)) + if (!wdev->conn) return false; - if (!wdev->conn->prev_bssid_valid) + if (status == WLAN_STATUS_SUCCESS) { + wdev->conn->state = CFG80211_CONN_CONNECTED; return false; + } - /* - * Some stupid APs don't accept reassoc, so we - * need to fall back to trying regular assoc. - */ - wdev->conn->prev_bssid_valid = false; - wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; + if (wdev->conn->prev_bssid_valid) { + /* + * Some stupid APs don't accept reassoc, so we + * need to fall back to trying regular assoc; + * return true so no event is sent to userspace. + */ + wdev->conn->prev_bssid_valid = false; + wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; + schedule_work(&rdev->conn_work); + return true; + } + + wdev->conn->state = CFG80211_CONN_DEAUTH; schedule_work(&rdev->conn_work); + return false; +} - return true; +void cfg80211_sme_deauth(struct wireless_dev *wdev) +{ + cfg80211_sme_free(wdev); } -void cfg80211_sme_failed_assoc(struct wireless_dev *wdev) +void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) { - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + cfg80211_sme_free(wdev); +} - wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL; +void cfg80211_sme_disassoc(struct wireless_dev *wdev) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + + if (!wdev->conn) + return; + + wdev->conn->state = CFG80211_CONN_DEAUTH; schedule_work(&rdev->conn_work); } +void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) +{ + cfg80211_sme_disassoc(wdev); +} + +static int cfg80211_sme_connect(struct wireless_dev *wdev, + struct cfg80211_connect_params *connect, + const u8 *prev_bssid) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_bss *bss; + int err; + + if (!rdev->ops->auth || !rdev->ops->assoc) + return -EOPNOTSUPP; + + if (wdev->current_bss) + return -EALREADY; + + if (WARN_ON(wdev->conn)) + return -EINPROGRESS; + + wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); + if (!wdev->conn) + return -ENOMEM; + + /* + * Copy all parameters, and treat explicitly IEs, BSSID, SSID. + */ + memcpy(&wdev->conn->params, connect, sizeof(*connect)); + if (connect->bssid) { + wdev->conn->params.bssid = wdev->conn->bssid; + memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); + } + + if (connect->ie) { + wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, + GFP_KERNEL); + wdev->conn->params.ie = wdev->conn->ie; + if (!wdev->conn->ie) { + kfree(wdev->conn); + wdev->conn = NULL; + return -ENOMEM; + } + } + + if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { + wdev->conn->auto_auth = true; + /* start with open system ... should mostly work */ + wdev->conn->params.auth_type = + NL80211_AUTHTYPE_OPEN_SYSTEM; + } else { + wdev->conn->auto_auth = false; + } + + wdev->conn->params.ssid = wdev->ssid; + wdev->conn->params.ssid_len = connect->ssid_len; + + /* see if we have the bss already */ + bss = cfg80211_get_conn_bss(wdev); + + if (prev_bssid) { + memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); + wdev->conn->prev_bssid_valid = true; + } + + /* we're good if we have a matching bss struct */ + if (bss) { + wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; + err = cfg80211_conn_do_work(wdev); + cfg80211_put_bss(wdev->wiphy, bss); + } else { + /* otherwise we'll need to scan for the AP first */ + err = cfg80211_conn_scan(wdev); + + /* + * If we can't scan right now, then we need to scan again + * after the current scan finished, since the parameters + * changed (unless we find a good AP anyway). + */ + if (err == -EBUSY) { + err = 0; + wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; + } + } + + if (err) + cfg80211_sme_free(wdev); + + return err; +} + +static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + int err; + + if (!wdev->conn) + return 0; + + if (!rdev->ops->deauth) + return -EOPNOTSUPP; + + if (wdev->conn->state == CFG80211_CONN_SCANNING || + wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) { + err = 0; + goto out; + } + + /* wdev->conn->params.bssid must be set if > SCANNING */ + err = cfg80211_mlme_deauth(rdev, wdev->netdev, + wdev->conn->params.bssid, + NULL, 0, reason, false); + out: + cfg80211_sme_free(wdev); + return err; +} + +/* + * code shared for in-device and software SME + */ + +static bool cfg80211_is_all_idle(void) +{ + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + bool is_all_idle = true; + + /* + * All devices must be idle as otherwise if you are actively + * scanning some new beacon hints could be learned and would + * count as new regulatory hints. + */ + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry(wdev, &rdev->wdev_list, list) { + wdev_lock(wdev); + if (wdev->conn || wdev->current_bss) + is_all_idle = false; + wdev_unlock(wdev); + } + } + + return is_all_idle; +} + +static void disconnect_work(struct work_struct *work) +{ + rtnl_lock(); + if (cfg80211_is_all_idle()) + regulatory_hint_disconnect(); + rtnl_unlock(); +} + +static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); + + +/* + * API calls for drivers implementing connect/disconnect and + * SME event handling + */ + void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, @@ -424,9 +575,6 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; - if (wdev->sme_state != CFG80211_SME_CONNECTING) - return; - nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, @@ -463,15 +611,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, wdev->current_bss = NULL; } - if (wdev->conn) - wdev->conn->state = CFG80211_CONN_IDLE; - if (status != WLAN_STATUS_SUCCESS) { - wdev->sme_state = CFG80211_SME_IDLE; - if (wdev->conn) - kfree(wdev->conn->ie); - kfree(wdev->conn); - wdev->conn = NULL; kfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; @@ -480,21 +620,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, } if (!bss) - bss = cfg80211_get_bss(wdev->wiphy, - wdev->conn ? wdev->conn->params.channel : - NULL, - bssid, + bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); - if (WARN_ON(!bss)) return; cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); - wdev->sme_state = CFG80211_SME_CONNECTED; cfg80211_upload_connect_keys(wdev); rcu_read_lock(); @@ -530,8 +665,6 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, struct cfg80211_event *ev; unsigned long flags; - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); - ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); if (!ev) return; @@ -572,13 +705,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev, wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) goto out; - if (wdev->sme_state != CFG80211_SME_CONNECTED) - goto out; - - /* internal error -- how did we get to CONNECTED w/o BSS? */ - if (WARN_ON(!wdev->current_bss)) { + if (WARN_ON(!wdev->current_bss)) goto out; - } cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); @@ -628,8 +756,6 @@ void cfg80211_roamed(struct net_device *dev, struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss; - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); - bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); @@ -651,8 +777,6 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_event *ev; unsigned long flags; - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); - if (WARN_ON(!bss)) return; @@ -694,25 +818,14 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; - if (wdev->sme_state != CFG80211_SME_CONNECTED) - return; - if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); } wdev->current_bss = NULL; - wdev->sme_state = CFG80211_SME_IDLE; wdev->ssid_len = 0; - if (wdev->conn) { - kfree(wdev->conn->ie); - wdev->conn->ie = NULL; - kfree(wdev->conn); - wdev->conn = NULL; - } - nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); /* @@ -741,8 +854,6 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason, struct cfg80211_event *ev; unsigned long flags; - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); - ev = kzalloc(sizeof(*ev) + ie_len, gfp); if (!ev) return; @@ -760,6 +871,9 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason, } EXPORT_SYMBOL(cfg80211_disconnected); +/* + * API calls for nl80211/wext compatibility code + */ int cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, @@ -767,14 +881,10 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, const u8 *prev_bssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_bss *bss = NULL; int err; ASSERT_WDEV_LOCK(wdev); - if (wdev->sme_state != CFG80211_SME_IDLE) - return -EALREADY; - if (WARN_ON(wdev->connect_keys)) { kfree(wdev->connect_keys); wdev->connect_keys = NULL; @@ -810,105 +920,22 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, } } - if (!rdev->ops->connect) { - if (!rdev->ops->auth || !rdev->ops->assoc) - return -EOPNOTSUPP; - - if (WARN_ON(wdev->conn)) - return -EINPROGRESS; - - wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); - if (!wdev->conn) - return -ENOMEM; - - /* - * Copy all parameters, and treat explicitly IEs, BSSID, SSID. - */ - memcpy(&wdev->conn->params, connect, sizeof(*connect)); - if (connect->bssid) { - wdev->conn->params.bssid = wdev->conn->bssid; - memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); - } + wdev->connect_keys = connkeys; + memcpy(wdev->ssid, connect->ssid, connect->ssid_len); + wdev->ssid_len = connect->ssid_len; - if (connect->ie) { - wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, - GFP_KERNEL); - wdev->conn->params.ie = wdev->conn->ie; - if (!wdev->conn->ie) { - kfree(wdev->conn); - wdev->conn = NULL; - return -ENOMEM; - } - } - - if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { - wdev->conn->auto_auth = true; - /* start with open system ... should mostly work */ - wdev->conn->params.auth_type = - NL80211_AUTHTYPE_OPEN_SYSTEM; - } else { - wdev->conn->auto_auth = false; - } - - memcpy(wdev->ssid, connect->ssid, connect->ssid_len); - wdev->ssid_len = connect->ssid_len; - wdev->conn->params.ssid = wdev->ssid; - wdev->conn->params.ssid_len = connect->ssid_len; - - /* see if we have the bss already */ - bss = cfg80211_get_conn_bss(wdev); - - wdev->sme_state = CFG80211_SME_CONNECTING; - wdev->connect_keys = connkeys; - - if (prev_bssid) { - memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); - wdev->conn->prev_bssid_valid = true; - } - - /* we're good if we have a matching bss struct */ - if (bss) { - wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; - err = cfg80211_conn_do_work(wdev); - cfg80211_put_bss(wdev->wiphy, bss); - } else { - /* otherwise we'll need to scan for the AP first */ - err = cfg80211_conn_scan(wdev); - /* - * If we can't scan right now, then we need to scan again - * after the current scan finished, since the parameters - * changed (unless we find a good AP anyway). - */ - if (err == -EBUSY) { - err = 0; - wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; - } - } - if (err) { - kfree(wdev->conn->ie); - kfree(wdev->conn); - wdev->conn = NULL; - wdev->sme_state = CFG80211_SME_IDLE; - wdev->connect_keys = NULL; - wdev->ssid_len = 0; - } - - return err; - } else { - wdev->sme_state = CFG80211_SME_CONNECTING; - wdev->connect_keys = connkeys; + if (!rdev->ops->connect) + err = cfg80211_sme_connect(wdev, connect, prev_bssid); + else err = rdev_connect(rdev, dev, connect); - if (err) { - wdev->connect_keys = NULL; - wdev->sme_state = CFG80211_SME_IDLE; - return err; - } - memcpy(wdev->ssid, connect->ssid, connect->ssid_len); - wdev->ssid_len = connect->ssid_len; - - return 0; + if (err) { + wdev->connect_keys = NULL; + wdev->ssid_len = 0; + return err; } + + return 0; } int cfg80211_disconnect(struct cfg80211_registered_device *rdev, @@ -919,78 +946,17 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, ASSERT_WDEV_LOCK(wdev); - if (wdev->sme_state == CFG80211_SME_IDLE) - return -EINVAL; - kfree(wdev->connect_keys); wdev->connect_keys = NULL; - if (!rdev->ops->disconnect) { - if (!rdev->ops->deauth) - return -EOPNOTSUPP; - - /* was it connected by userspace SME? */ - if (!wdev->conn) { - cfg80211_mlme_down(rdev, dev); - goto disconnect; - } - - if (wdev->sme_state == CFG80211_SME_CONNECTING && - (wdev->conn->state == CFG80211_CONN_SCANNING || - wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) { - wdev->sme_state = CFG80211_SME_IDLE; - kfree(wdev->conn->ie); - kfree(wdev->conn); - wdev->conn = NULL; - wdev->ssid_len = 0; - return 0; - } - - /* wdev->conn->params.bssid must be set if > SCANNING */ - err = cfg80211_mlme_deauth(rdev, dev, - wdev->conn->params.bssid, - NULL, 0, reason, false); - if (err) - return err; + if (wdev->conn) { + err = cfg80211_sme_disconnect(wdev, reason); + } else if (!rdev->ops->disconnect) { + cfg80211_mlme_down(rdev, dev); + err = 0; } else { err = rdev_disconnect(rdev, dev, reason); - if (err) - return err; } - disconnect: - if (wdev->sme_state == CFG80211_SME_CONNECTED) - __cfg80211_disconnected(dev, NULL, 0, 0, false); - else if (wdev->sme_state == CFG80211_SME_CONNECTING) - __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - wextev, NULL); - - return 0; -} - -void cfg80211_sme_disassoc(struct net_device *dev, - struct cfg80211_internal_bss *bss) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - u8 bssid[ETH_ALEN]; - - ASSERT_WDEV_LOCK(wdev); - - if (!wdev->conn) - return; - - if (wdev->conn->state == CFG80211_CONN_IDLE) - return; - - /* - * Ok, so the association was made by this SME -- we don't - * want it any more so deauthenticate too. - */ - - memcpy(bssid, bss->pub.bssid, ETH_ALEN); - - cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, - WLAN_REASON_DEAUTH_LEAVING, false); + return err; } diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index a53f8404f451..14c9a2583ba0 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -89,7 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, wdev_lock(wdev); - if (wdev->sme_state != CFG80211_SME_IDLE) { + if (wdev->conn) { bool event = true; if (wdev->wext.connect.channel == chan) { @@ -188,7 +188,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, err = 0; - if (wdev->sme_state != CFG80211_SME_IDLE) { + if (wdev->conn) { bool event = true; if (wdev->wext.connect.ssid && len && @@ -277,7 +277,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, wdev_lock(wdev); - if (wdev->sme_state != CFG80211_SME_IDLE) { + if (wdev->conn) { err = 0; /* both automatic */ if (!bssid && !wdev->wext.connect.bssid) @@ -364,7 +364,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev, wdev->wext.ie = ie; wdev->wext.ie_len = ie_len; - if (wdev->sme_state != CFG80211_SME_IDLE) { + if (wdev->conn) { err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) -- cgit v1.2.3 From 75698b17ac3b681f4345a9be48b147381d17266d Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Fri, 31 May 2013 15:05:47 +0000 Subject: Clean up indentation in net/ipv6/transp_v6.h Reduce the indentation of most of the functions and make it a bit more consistent. This allows longer function and arg names to be consistently indented without wrapping. Signed-off-by: Lorenzo Colitti Signed-off-by: David S. Miller --- include/net/transp_v6.h | 70 ++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index eb40e71ff2ee..2d9d0e3115b9 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -7,47 +7,47 @@ * IPv6 transport protocols */ -extern struct proto rawv6_prot; -extern struct proto udpv6_prot; -extern struct proto udplitev6_prot; -extern struct proto tcpv6_prot; -extern struct proto pingv6_prot; +extern struct proto rawv6_prot; +extern struct proto udpv6_prot; +extern struct proto udplitev6_prot; +extern struct proto tcpv6_prot; +extern struct proto pingv6_prot; struct flowi6; /* extension headers */ -extern int ipv6_exthdrs_init(void); -extern void ipv6_exthdrs_exit(void); -extern int ipv6_frag_init(void); -extern void ipv6_frag_exit(void); +extern int ipv6_exthdrs_init(void); +extern void ipv6_exthdrs_exit(void); +extern int ipv6_frag_init(void); +extern void ipv6_frag_exit(void); /* transport protocols */ -extern int pingv6_init(void); -extern void pingv6_exit(void); -extern int rawv6_init(void); -extern void rawv6_exit(void); -extern int udpv6_init(void); -extern void udpv6_exit(void); -extern int udplitev6_init(void); -extern void udplitev6_exit(void); -extern int tcpv6_init(void); -extern void tcpv6_exit(void); - -extern int udpv6_connect(struct sock *sk, - struct sockaddr *uaddr, - int addr_len); - -extern int ip6_datagram_recv_ctl(struct sock *sk, - struct msghdr *msg, - struct sk_buff *skb); - -extern int ip6_datagram_send_ctl(struct net *net, - struct sock *sk, - struct msghdr *msg, - struct flowi6 *fl6, - struct ipv6_txoptions *opt, - int *hlimit, int *tclass, - int *dontfrag); +extern int pingv6_init(void); +extern void pingv6_exit(void); +extern int rawv6_init(void); +extern void rawv6_exit(void); +extern int udpv6_init(void); +extern void udpv6_exit(void); +extern int udplitev6_init(void); +extern void udplitev6_exit(void); +extern int tcpv6_init(void); +extern void tcpv6_exit(void); + +extern int udpv6_connect(struct sock *sk, + struct sockaddr *uaddr, + int addr_len); + +extern int ip6_datagram_recv_ctl(struct sock *sk, + struct msghdr *msg, + struct sk_buff *skb); + +extern int ip6_datagram_send_ctl(struct net *net, + struct sock *sk, + struct msghdr *msg, + struct flowi6 *fl6, + struct ipv6_txoptions *opt, + int *hlimit, int *tclass, + int *dontfrag); #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) -- cgit v1.2.3 From 17ef66afc0bdbbdc5c526db5e24bdd2dc3df1205 Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Fri, 31 May 2013 15:05:48 +0000 Subject: net: ipv6: Unify {raw,udp}6_sock_seq_show. udp6_sock_seq_show and raw6_sock_seq_show are identical, except the UDP version displays ports and the raw version displays the protocol. Refactor most of the code in these two functions into a new common ip6_dgram_sock_seq_show function, in preparation for using it to display ICMPv6 sockets as well. Also reduce the indentation in parts of include/net/transp_v6.h to improve readability. Compiles and displays reasonable results with CONFIG_IPV6={n,m,y} Signed-off-by: Lorenzo Colitti Signed-off-by: David S. Miller --- include/net/transp_v6.h | 13 +++++++++++++ net/ipv6/datagram.c | 27 +++++++++++++++++++++++++++ net/ipv6/raw.c | 45 ++++++++------------------------------------- net/ipv6/udp.c | 49 +++++++++---------------------------------------- 4 files changed, 57 insertions(+), 77 deletions(-) (limited to 'include') diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 2d9d0e3115b9..6dddc08f27c9 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -49,6 +49,12 @@ extern int ip6_datagram_send_ctl(struct net *net, int *hlimit, int *tclass, int *dontfrag); +extern void ip6_dgram_sock_seq_show(struct seq_file *seq, + struct sock *sp, + __u16 srcp, + __u16 destp, + int bucket); + #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) /* @@ -58,4 +64,11 @@ extern const struct inet_connection_sock_af_ops ipv4_specific; extern void inet6_destroy_sock(struct sock *sk); +#define IPV6_SEQ_DGRAM_HEADER \ + " sl " \ + "local_address " \ + "remote_address " \ + "st tx_queue rx_queue tr tm->when retrnsmt" \ + " uid timeout inode ref pointer drops\n" + #endif diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 4b56cbbc7890..197e6f4a2b74 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -879,3 +879,30 @@ exit_f: return err; } EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl); + +void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, + __u16 srcp, __u16 destp, int bucket) +{ + struct ipv6_pinfo *np = inet6_sk(sp); + const struct in6_addr *dest, *src; + + dest = &np->daddr; + src = &np->rcv_saddr; + seq_printf(seq, + "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", + bucket, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp, + sp->sk_state, + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), + 0, 0L, 0, + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), + 0, + sock_i_ino(sp), + atomic_read(&sp->sk_refcnt), sp, + atomic_read(&sp->sk_drops)); +} diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 4f8886aa8429..c45f7a5c36e9 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1227,45 +1227,16 @@ struct proto rawv6_prot = { }; #ifdef CONFIG_PROC_FS -static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) -{ - struct ipv6_pinfo *np = inet6_sk(sp); - const struct in6_addr *dest, *src; - __u16 destp, srcp; - - dest = &np->daddr; - src = &np->rcv_saddr; - destp = 0; - srcp = inet_sk(sp)->inet_num; - seq_printf(seq, - "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", - i, - src->s6_addr32[0], src->s6_addr32[1], - src->s6_addr32[2], src->s6_addr32[3], srcp, - dest->s6_addr32[0], dest->s6_addr32[1], - dest->s6_addr32[2], dest->s6_addr32[3], destp, - sp->sk_state, - sk_wmem_alloc_get(sp), - sk_rmem_alloc_get(sp), - 0, 0L, 0, - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), - 0, - sock_i_ino(sp), - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); -} - static int raw6_seq_show(struct seq_file *seq, void *v) { - if (v == SEQ_START_TOKEN) - seq_printf(seq, - " sl " - "local_address " - "remote_address " - "st tx_queue rx_queue tr tm->when retrnsmt" - " uid timeout inode ref pointer drops\n"); - else - raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); + if (v == SEQ_START_TOKEN) { + seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); + } else { + struct sock *sp = v; + __u16 srcp = inet_sk(sp)->inet_num; + ip6_dgram_sock_seq_show(seq, v, srcp, 0, + raw_seq_private(seq)->bucket); + } return 0; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 42923b14dfa6..b5808539cd5c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1359,48 +1359,17 @@ static const struct inet6_protocol udpv6_protocol = { /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS - -static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket) -{ - struct inet_sock *inet = inet_sk(sp); - struct ipv6_pinfo *np = inet6_sk(sp); - const struct in6_addr *dest, *src; - __u16 destp, srcp; - - dest = &np->daddr; - src = &np->rcv_saddr; - destp = ntohs(inet->inet_dport); - srcp = ntohs(inet->inet_sport); - seq_printf(seq, - "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", - bucket, - src->s6_addr32[0], src->s6_addr32[1], - src->s6_addr32[2], src->s6_addr32[3], srcp, - dest->s6_addr32[0], dest->s6_addr32[1], - dest->s6_addr32[2], dest->s6_addr32[3], destp, - sp->sk_state, - sk_wmem_alloc_get(sp), - sk_rmem_alloc_get(sp), - 0, 0L, 0, - from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), - 0, - sock_i_ino(sp), - atomic_read(&sp->sk_refcnt), sp, - atomic_read(&sp->sk_drops)); -} - int udp6_seq_show(struct seq_file *seq, void *v) { - if (v == SEQ_START_TOKEN) - seq_printf(seq, - " sl " - "local_address " - "remote_address " - "st tx_queue rx_queue tr tm->when retrnsmt" - " uid timeout inode ref pointer drops\n"); - else - udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket); + if (v == SEQ_START_TOKEN) { + seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); + } else { + int bucket = ((struct udp_iter_state *)seq->private)->bucket; + struct inet_sock *inet = inet_sk(v); + __u16 srcp = ntohs(inet->inet_sport); + __u16 destp = ntohs(inet->inet_dport); + ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); + } return 0; } -- cgit v1.2.3 From 8cc785f6f429c2a3fb81745dc142cbd72a462c4a Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Fri, 31 May 2013 15:05:49 +0000 Subject: net: ipv4: make the ping /proc code AF-independent Introduce a ping_seq_afinfo structure (similar to its UDP equivalent) and use it to make some of the ping /proc functions address-family independent. Rename the remaining ping /proc functions from ping_* to ping_v4_*. Compiles and displays reasonable results with CONFIG_IPV6={n,m,y} Signed-off-by: Lorenzo Colitti Signed-off-by: David S. Miller --- include/net/ping.h | 8 ++++++ net/ipv4/ping.c | 73 +++++++++++++++++++++++++++++++++++------------------- 2 files changed, 55 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/net/ping.h b/include/net/ping.h index 9242fa090d3d..b9282f019804 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -49,6 +49,7 @@ struct ping_table { struct ping_iter_state { struct seq_net_private p; int bucket; + sa_family_t family; }; extern struct proto ping_prot; @@ -87,6 +88,13 @@ int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); void ping_rcv(struct sk_buff *skb); #ifdef CONFIG_PROC_FS +struct ping_seq_afinfo { + char *name; + sa_family_t family; + const struct file_operations *seq_fops; + const struct seq_operations seq_ops; +}; + extern int __init ping_proc_init(void); extern void ping_proc_exit(void); #endif diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 71f6ad02fa67..8c2da9b8cd86 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -1001,7 +1001,8 @@ static struct sock *ping_get_first(struct seq_file *seq, int start) continue; sk_nulls_for_each(sk, node, hslot) { - if (net_eq(sock_net(sk), net)) + if (net_eq(sock_net(sk), net) && + sk->sk_family == state->family) goto found; } } @@ -1034,16 +1035,23 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos) return pos ? NULL : sk; } -static void *ping_seq_start(struct seq_file *seq, loff_t *pos) +static void *ping_seq_start(struct seq_file *seq, loff_t *pos, + sa_family_t family) { struct ping_iter_state *state = seq->private; state->bucket = 0; + state->family = family; read_lock_bh(&ping_table.lock); return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } +static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos) +{ + return ping_seq_start(seq, pos, AF_INET); +} + static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; @@ -1062,7 +1070,7 @@ static void ping_seq_stop(struct seq_file *seq, void *v) read_unlock_bh(&ping_table.lock); } -static void ping_format_sock(struct sock *sp, struct seq_file *f, +static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) { struct inet_sock *inet = inet_sk(sp); @@ -1083,7 +1091,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f, atomic_read(&sp->sk_drops), len); } -static int ping_seq_show(struct seq_file *seq, void *v) +static int ping_v4_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", @@ -1094,22 +1102,23 @@ static int ping_seq_show(struct seq_file *seq, void *v) struct ping_iter_state *state = seq->private; int len; - ping_format_sock(v, seq, state->bucket, &len); + ping_v4_format_sock(v, seq, state->bucket, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } return 0; } -static const struct seq_operations ping_seq_ops = { - .show = ping_seq_show, - .start = ping_seq_start, +static const struct seq_operations ping_v4_seq_ops = { + .show = ping_v4_seq_show, + .start = ping_v4_seq_start, .next = ping_seq_next, .stop = ping_seq_stop, }; static int ping_seq_open(struct inode *inode, struct file *file) { - return seq_open_net(inode, file, &ping_seq_ops, + struct ping_seq_afinfo *afinfo = PDE_DATA(inode); + return seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct ping_iter_state)); } @@ -1120,46 +1129,58 @@ static const struct file_operations ping_seq_fops = { .release = seq_release_net, }; -static int ping_proc_register(struct net *net) +static struct ping_seq_afinfo ping_v4_seq_afinfo = { + .name = "icmp", + .family = AF_INET, + .seq_fops = &ping_seq_fops, + .seq_ops = { + .start = ping_v4_seq_start, + .show = ping_v4_seq_show, + .next = ping_seq_next, + .stop = ping_seq_stop, + }, +}; + +static int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo) { struct proc_dir_entry *p; - int rc = 0; - - p = proc_create("icmp", S_IRUGO, net->proc_net, &ping_seq_fops); + p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, + afinfo->seq_fops, afinfo); if (!p) - rc = -ENOMEM; - return rc; + return -ENOMEM; + return 0; } -static void ping_proc_unregister(struct net *net) +static void ping_proc_unregister(struct net *net, + struct ping_seq_afinfo *afinfo) { - remove_proc_entry("icmp", net->proc_net); + remove_proc_entry(afinfo->name, net->proc_net); } -static int __net_init ping_proc_init_net(struct net *net) +static int __net_init ping_v4_proc_init_net(struct net *net) { - return ping_proc_register(net); + return ping_proc_register(net, &ping_v4_seq_afinfo); } -static void __net_exit ping_proc_exit_net(struct net *net) +static void __net_exit ping_v4_proc_exit_net(struct net *net) { - ping_proc_unregister(net); + ping_proc_unregister(net, &ping_v4_seq_afinfo); } -static struct pernet_operations ping_net_ops = { - .init = ping_proc_init_net, - .exit = ping_proc_exit_net, +static struct pernet_operations ping_v4_net_ops = { + .init = ping_v4_proc_init_net, + .exit = ping_v4_proc_exit_net, }; int __init ping_proc_init(void) { - return register_pernet_subsys(&ping_net_ops); + return register_pernet_subsys(&ping_v4_net_ops); } void ping_proc_exit(void) { - unregister_pernet_subsys(&ping_net_ops); + unregister_pernet_subsys(&ping_v4_net_ops); } #endif -- cgit v1.2.3 From d862e546142328d18377a4704be97f2ae301847a Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Fri, 31 May 2013 15:05:50 +0000 Subject: net: ipv6: Implement /proc/net/icmp6. The format is based on /proc/net/icmp and /proc/net/{udp,raw}6. Compiles and displays reasonable results with CONFIG_IPV6={n,m,y} Couldn't figure out how to test without CONFIG_PROC_FS enabled. Signed-off-by: Lorenzo Colitti Signed-off-by: David S. Miller --- include/net/ping.h | 8 +++++ net/ipv4/ping.c | 21 ++++++----- net/ipv6/ping.c | 102 +++++++++++++++++++++++++++++++++++++++++------------ 3 files changed, 99 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/net/ping.h b/include/net/ping.h index b9282f019804..db04802f1673 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -95,6 +95,14 @@ struct ping_seq_afinfo { const struct seq_operations seq_ops; }; +extern const struct file_operations ping_seq_fops; + +void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family); +void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos); +void ping_seq_stop(struct seq_file *seq, void *v); +int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo); +void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo); + extern int __init ping_proc_init(void); extern void ping_proc_exit(void); #endif diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 8c2da9b8cd86..3552a45a6f86 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -1035,8 +1035,7 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos) return pos ? NULL : sk; } -static void *ping_seq_start(struct seq_file *seq, loff_t *pos, - sa_family_t family) +void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family) { struct ping_iter_state *state = seq->private; state->bucket = 0; @@ -1046,13 +1045,14 @@ static void *ping_seq_start(struct seq_file *seq, loff_t *pos, return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } +EXPORT_SYMBOL_GPL(ping_seq_start); static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos) { return ping_seq_start(seq, pos, AF_INET); } -static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) +void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; @@ -1064,11 +1064,13 @@ static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++*pos; return sk; } +EXPORT_SYMBOL_GPL(ping_seq_next); -static void ping_seq_stop(struct seq_file *seq, void *v) +void ping_seq_stop(struct seq_file *seq, void *v) { read_unlock_bh(&ping_table.lock); } +EXPORT_SYMBOL_GPL(ping_seq_stop); static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) @@ -1122,12 +1124,13 @@ static int ping_seq_open(struct inode *inode, struct file *file) sizeof(struct ping_iter_state)); } -static const struct file_operations ping_seq_fops = { +const struct file_operations ping_seq_fops = { .open = ping_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; +EXPORT_SYMBOL_GPL(ping_seq_fops); static struct ping_seq_afinfo ping_v4_seq_afinfo = { .name = "icmp", @@ -1141,7 +1144,7 @@ static struct ping_seq_afinfo ping_v4_seq_afinfo = { }, }; -static int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo) +int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo) { struct proc_dir_entry *p; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, @@ -1150,13 +1153,13 @@ static int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo) return -ENOMEM; return 0; } +EXPORT_SYMBOL_GPL(ping_proc_register); -static void ping_proc_unregister(struct net *net, - struct ping_seq_afinfo *afinfo) +void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } - +EXPORT_SYMBOL_GPL(ping_proc_unregister); static int __net_init ping_v4_proc_init_net(struct net *net) { diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index a6462d657c15..62ac5f2e0aaf 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -78,29 +78,6 @@ int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, return 0; } -int __init pingv6_init(void) -{ - pingv6_ops.ipv6_recv_error = ipv6_recv_error; - pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl; - pingv6_ops.icmpv6_err_convert = icmpv6_err_convert; - pingv6_ops.ipv6_icmp_error = ipv6_icmp_error; - pingv6_ops.ipv6_chk_addr = ipv6_chk_addr; - return inet6_register_protosw(&pingv6_protosw); -} - -/* This never gets called because it's not possible to unload the ipv6 module, - * but just in case. - */ -void pingv6_exit(void) -{ - pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error; - pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl; - pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert; - pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error; - pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr; - inet6_unregister_protosw(&pingv6_protosw); -} - int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { @@ -214,3 +191,82 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, return err; } + +#ifdef CONFIG_PROC_FS +static void *ping_v6_seq_start(struct seq_file *seq, loff_t *pos) +{ + return ping_seq_start(seq, pos, AF_INET6); +} + +int ping_v6_seq_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); + } else { + int bucket = ((struct ping_iter_state *) seq->private)->bucket; + struct inet_sock *inet = inet_sk(v); + __u16 srcp = ntohs(inet->inet_sport); + __u16 destp = ntohs(inet->inet_dport); + ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); + } + return 0; +} + +static struct ping_seq_afinfo ping_v6_seq_afinfo = { + .name = "icmp6", + .family = AF_INET6, + .seq_fops = &ping_seq_fops, + .seq_ops = { + .start = ping_v6_seq_start, + .show = ping_v6_seq_show, + .next = ping_seq_next, + .stop = ping_seq_stop, + }, +}; + +static int __net_init ping_v6_proc_init_net(struct net *net) +{ + return ping_proc_register(net, &ping_v6_seq_afinfo); +} + +static void __net_init ping_v6_proc_exit_net(struct net *net) +{ + return ping_proc_unregister(net, &ping_v6_seq_afinfo); +} + +static struct pernet_operations ping_v6_net_ops = { + .init = ping_v6_proc_init_net, + .exit = ping_v6_proc_exit_net, +}; +#endif + +int __init pingv6_init(void) +{ +#ifdef CONFIG_PROC_FS + int ret = register_pernet_subsys(&ping_v6_net_ops); + if (ret) + return ret; +#endif + pingv6_ops.ipv6_recv_error = ipv6_recv_error; + pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl; + pingv6_ops.icmpv6_err_convert = icmpv6_err_convert; + pingv6_ops.ipv6_icmp_error = ipv6_icmp_error; + pingv6_ops.ipv6_chk_addr = ipv6_chk_addr; + return inet6_register_protosw(&pingv6_protosw); +} + +/* This never gets called because it's not possible to unload the ipv6 module, + * but just in case. + */ +void pingv6_exit(void) +{ + pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error; + pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl; + pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert; + pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error; + pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr; +#ifdef CONFIG_PROC_FS + unregister_pernet_subsys(&ping_v6_net_ops); +#endif + inet6_unregister_protosw(&pingv6_protosw); +} -- cgit v1.2.3 From 0e9649c143eb9d7ecaaef221fd411e5c747f97ce Mon Sep 17 00:00:00 2001 From: Jean Sacren Date: Sat, 1 Jun 2013 16:23:16 +0000 Subject: net: do not manually initialize enumerators Clean up unnecessary initialization of enumerators as the compiler takes care of that. Signed-off-by: Jean Sacren Signed-off-by: David S. Miller --- include/linux/net.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/net.h b/include/linux/net.h index 99c9f0c103c2..4f27575ce1d6 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -79,9 +79,9 @@ enum sock_type { #endif /* ARCH_HAS_SOCKET_TYPES */ enum sock_shutdown_cmd { - SHUT_RD = 0, - SHUT_WR = 1, - SHUT_RDWR = 2, + SHUT_RD, + SHUT_WR, + SHUT_RDWR, }; struct socket_wq { -- cgit v1.2.3 From f145b67f24e6855a026fbe9c9f1df97412044d1d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 2 Jun 2013 07:04:26 +0000 Subject: transp_v6.h: style neatening Use a more current code style. Remove extern from function prototypes. Align function arguments and reflow to 80 cols. Use network comment styles. Signed-off-by: Joe Perches cc: Lorenzo Colitti , Signed-off-by: David S. Miller --- include/net/transp_v6.h | 89 ++++++++++++++++++++----------------------------- 1 file changed, 37 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 6dddc08f27c9..48660e50ae90 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -3,72 +3,57 @@ #include -/* - * IPv6 transport protocols - */ - -extern struct proto rawv6_prot; -extern struct proto udpv6_prot; -extern struct proto udplitev6_prot; -extern struct proto tcpv6_prot; -extern struct proto pingv6_prot; +/* IPv6 transport protocols */ +extern struct proto rawv6_prot; +extern struct proto udpv6_prot; +extern struct proto udplitev6_prot; +extern struct proto tcpv6_prot; +extern struct proto pingv6_prot; struct flowi6; /* extension headers */ -extern int ipv6_exthdrs_init(void); -extern void ipv6_exthdrs_exit(void); -extern int ipv6_frag_init(void); -extern void ipv6_frag_exit(void); +int ipv6_exthdrs_init(void); +void ipv6_exthdrs_exit(void); +int ipv6_frag_init(void); +void ipv6_frag_exit(void); /* transport protocols */ -extern int pingv6_init(void); -extern void pingv6_exit(void); -extern int rawv6_init(void); -extern void rawv6_exit(void); -extern int udpv6_init(void); -extern void udpv6_exit(void); -extern int udplitev6_init(void); -extern void udplitev6_exit(void); -extern int tcpv6_init(void); -extern void tcpv6_exit(void); +int pingv6_init(void); +void pingv6_exit(void); +int rawv6_init(void); +void rawv6_exit(void); +int udpv6_init(void); +void udpv6_exit(void); +int udplitev6_init(void); +void udplitev6_exit(void); +int tcpv6_init(void); +void tcpv6_exit(void); -extern int udpv6_connect(struct sock *sk, - struct sockaddr *uaddr, - int addr_len); +int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); -extern int ip6_datagram_recv_ctl(struct sock *sk, - struct msghdr *msg, - struct sk_buff *skb); +int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, + struct sk_buff *skb); -extern int ip6_datagram_send_ctl(struct net *net, - struct sock *sk, - struct msghdr *msg, - struct flowi6 *fl6, - struct ipv6_txoptions *opt, - int *hlimit, int *tclass, - int *dontfrag); +int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, + struct flowi6 *fl6, struct ipv6_txoptions *opt, + int *hlimit, int *tclass, int *dontfrag); -extern void ip6_dgram_sock_seq_show(struct seq_file *seq, - struct sock *sp, - __u16 srcp, - __u16 destp, - int bucket); +void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, + __u16 srcp, __u16 destp, int bucket); -#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) +#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) -/* - * address family specific functions - */ +/* address family specific functions */ extern const struct inet_connection_sock_af_ops ipv4_specific; -extern void inet6_destroy_sock(struct sock *sk); +void inet6_destroy_sock(struct sock *sk); -#define IPV6_SEQ_DGRAM_HEADER \ - " sl " \ - "local_address " \ - "remote_address " \ - "st tx_queue rx_queue tr tm->when retrnsmt" \ - " uid timeout inode ref pointer drops\n" +#define IPV6_SEQ_DGRAM_HEADER \ + " sl " \ + "local_address " \ + "remote_address " \ + "st tx_queue rx_queue tr tm->when retrnsmt" \ + " uid timeout inode ref pointer drops\n" #endif -- cgit v1.2.3 From 989c6505cdda587f87573bb6828f23964dd3d19b Mon Sep 17 00:00:00 2001 From: Alexander Bondar Date: Thu, 16 May 2013 17:34:17 +0300 Subject: mac80211: Use suitable semantics for beacon availability indication Currently beacon availability upon association is marked by have_beacon flag of assoc_data structure that becomes unavailable when association completes. However beacon availability indication is required also after association to inform a driver. Currently dtim_period parameter is used for this purpose. Move have_beacon flag to another structure, persistant throughout a interface's life cycle. Use suitable sematics for beacon availability indication. Signed-off-by: Alexander Bondar [fix another instance of BSS_CHANGED_DTIM_PERIOD in docs] Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 2 +- include/net/mac80211.h | 8 ++++---- net/mac80211/ieee80211_i.h | 3 ++- net/mac80211/mlme.c | 20 +++++++++++--------- net/mac80211/util.c | 5 +++-- 5 files changed, 21 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index b807ddac650c..c942eb0bbbeb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -785,7 +785,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update quotas\n"); } - } else if (changes & BSS_CHANGED_DTIM_PERIOD) { + } else if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon _after_ association so * remove the session protection. diff --git a/include/net/mac80211.h b/include/net/mac80211.h index cb37f82d8d09..a405a7a9775c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -217,8 +217,8 @@ struct ieee80211_chanctx_conf { * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS) * changed (currently only in P2P client mode, GO mode will be later) - * @BSS_CHANGED_DTIM_PERIOD: the DTIM period value was changed (set when - * it becomes valid, managed mode only) + * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available: + * currently dtim_period only is under consideration. * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, * note that this is only called when it changes after the channel * context had been assigned. @@ -244,7 +244,7 @@ enum ieee80211_bss_change { BSS_CHANGED_PS = 1<<17, BSS_CHANGED_TXPOWER = 1<<18, BSS_CHANGED_P2P_PS = 1<<19, - BSS_CHANGED_DTIM_PERIOD = 1<<20, + BSS_CHANGED_BEACON_INFO = 1<<20, BSS_CHANGED_BANDWIDTH = 1<<21, /* when adding here, make sure to change ieee80211_reconfig */ @@ -288,7 +288,7 @@ enum ieee80211_rssi_event { * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag * @dtim_period: num of beacons before the next DTIM, for beaconing, * valid in station mode only if after the driver was notified - * with the %BSS_CHANGED_DTIM_PERIOD flag, will be non-zero then. + * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then. * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old * as it may have been received during scanning long ago). If the * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 9eed6f1d1614..7a6f1a0207ec 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -366,7 +366,7 @@ struct ieee80211_mgd_assoc_data { u8 ssid_len; u8 supp_rates_len; bool wmm, uapsd; - bool have_beacon, need_beacon; + bool need_beacon; bool synced; bool timeout_started; @@ -404,6 +404,7 @@ struct ieee80211_if_managed { bool powersave; /* powersave requested for this iface */ bool broken_ap; /* AP is broken -- turn off powersave */ + bool have_beacon; u8 dtim_period; enum ieee80211_smps_mode req_smps, /* requested smps mode */ driver_smps_mode; /* smps mode request */ diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index aa5cd2e138be..ad9bb9e10cbb 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1360,7 +1360,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata) IEEE80211_STA_CONNECTION_POLL)) return false; - if (!sdata->vif.bss_conf.dtim_period) + if (!mgd->have_beacon) return false; rcu_read_lock(); @@ -1771,7 +1771,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, ieee80211_led_assoc(local, 1); - if (sdata->u.mgd.assoc_data->have_beacon) { + if (sdata->u.mgd.have_beacon) { /* * If the AP is buggy we may get here with no DTIM period * known, so assume it's 1 which is the only safe assumption @@ -1779,7 +1779,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, * probably just won't work at all. */ bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1; - bss_info_changed |= BSS_CHANGED_DTIM_PERIOD; + bss_info_changed |= BSS_CHANGED_BEACON_INFO; } else { bss_conf->dtim_period = 0; } @@ -1903,6 +1903,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.chswitch_timer); sdata->vif.bss_conf.dtim_period = 0; + ifmgd->have_beacon = false; ifmgd->flags = 0; ieee80211_vif_release_channel(sdata); @@ -2877,7 +2878,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, const struct ieee80211_tim_ie *tim_ie = elems.tim; ifmgd->dtim_period = tim_ie->dtim_period; } - ifmgd->assoc_data->have_beacon = true; + ifmgd->have_beacon = true; ifmgd->assoc_data->need_beacon = false; if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) { sdata->vif.bss_conf.sync_tsf = @@ -3059,7 +3060,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, * If we haven't had a beacon before, tell the driver about the * DTIM period (and beacon timing if desired) now. */ - if (!bss_conf->dtim_period) { + if (!ifmgd->have_beacon) { /* a few bogus AP send dtim_period = 0 or no TIM IE */ if (elems.tim) bss_conf->dtim_period = elems.tim->dtim_period ?: 1; @@ -3078,7 +3079,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.sync_dtim_count = 0; } - changed |= BSS_CHANGED_DTIM_PERIOD; + changed |= BSS_CHANGED_BEACON_INFO; + ifmgd->have_beacon = true; mutex_lock(&local->iflist_mtx); ieee80211_recalc_ps(local, -1); @@ -3424,8 +3426,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && time_after(jiffies, ifmgd->assoc_data->timeout)) { - if ((ifmgd->assoc_data->need_beacon && - !ifmgd->assoc_data->have_beacon) || + if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) || ieee80211_do_assoc(sdata)) { u8 bssid[ETH_ALEN]; @@ -4193,6 +4194,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->assoc_data = assoc_data; ifmgd->dtim_period = 0; + ifmgd->have_beacon = false; err = ieee80211_prep_connection(sdata, req->bss, true); if (err) @@ -4224,7 +4226,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->dtim_period = tim->dtim_period; dtim_count = tim->dtim_count; } - assoc_data->have_beacon = true; + ifmgd->have_beacon = true; assoc_data->timeout = jiffies; assoc_data->timeout_started = true; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 89a83770d152..5a6c1351d1d3 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1584,8 +1584,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) BSS_CHANGED_ARP_FILTER | BSS_CHANGED_PS; - if (sdata->u.mgd.dtim_period) - changed |= BSS_CHANGED_DTIM_PERIOD; + /* Re-send beacon info report to the driver */ + if (sdata->u.mgd.have_beacon) + changed |= BSS_CHANGED_BEACON_INFO; sdata_lock(sdata); ieee80211_bss_info_change_notify(sdata, changed); -- cgit v1.2.3 From 780b40df12cf0161d8ccc5381940e04584793933 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 5 Jun 2013 09:32:50 +0200 Subject: wireless: fix kernel-doc Some kernel-doc fixes for forgotten fields and renamed things. Signed-off-by: Johannes Berg --- Documentation/DocBook/80211.tmpl | 11 +++++------ include/net/cfg80211.h | 11 ++++++++++- net/mac80211/sta_info.h | 3 +++ 3 files changed, 18 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index ebe89694cf81..49267ea97568 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -127,12 +127,11 @@ !Finclude/net/cfg80211.h cfg80211_ibss_params !Finclude/net/cfg80211.h cfg80211_connect_params !Finclude/net/cfg80211.h cfg80211_pmksa -!Finclude/net/cfg80211.h cfg80211_send_rx_auth -!Finclude/net/cfg80211.h cfg80211_send_auth_timeout -!Finclude/net/cfg80211.h cfg80211_send_rx_assoc -!Finclude/net/cfg80211.h cfg80211_send_assoc_timeout -!Finclude/net/cfg80211.h cfg80211_send_deauth -!Finclude/net/cfg80211.h cfg80211_send_disassoc +!Finclude/net/cfg80211.h cfg80211_rx_mlme_mgmt +!Finclude/net/cfg80211.h cfg80211_auth_timeout +!Finclude/net/cfg80211.h cfg80211_rx_assoc_resp +!Finclude/net/cfg80211.h cfg80211_assoc_timeout +!Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt !Finclude/net/cfg80211.h cfg80211_ibss_joined !Finclude/net/cfg80211.h cfg80211_connect_result !Finclude/net/cfg80211.h cfg80211_roamed diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5b208064f1bd..855c76ccff38 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2853,7 +2853,7 @@ struct cfg80211_cached_keys; * @current_bss: (private) Used by the internal configuration code * @channel: (private) Used by the internal configuration code to track * the user-set AP, monitor and WDS channel - * @preset_chan: (private) Used by the internal configuration code to + * @preset_chandef: (private) Used by the internal configuration code to * track the channel to be used for AP later * @bssid: (private) Used by the internal configuration code * @ssid: (private) Used by the internal configuration code @@ -2875,6 +2875,15 @@ struct cfg80211_cached_keys; * @p2p_started: true if this is a P2P Device that has been started * @cac_started: true if DFS channel availability check has been started * @cac_start_time: timestamp (jiffies) when the dfs state was entered. + * @ps: powersave mode is enabled + * @ps_timeout: dynamic powersave timeout + * @ap_unexpected_nlportid: (private) netlink port ID of application + * registered for unexpected class 3 frames (AP mode) + * @conn: (private) cfg80211 software SME connection state machine data + * @connect_keys: (private) keys to set after connection is established + * @ibss_fixed: (private) IBSS is using fixed BSSID + * @event_list: (private) list for internal event processing + * @event_lock: (private) lock for event list */ struct wireless_dev { struct wiphy *wiphy; diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 41c28b977f7c..bd12fc54266c 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -297,6 +297,9 @@ struct sta_ampdu_mlme { * @rcu_head: RCU head used for freeing this station struct * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, * taken from HT/VHT capabilities or VHT operating mode notification + * @chains: chains ever used for RX from this station + * @chain_signal_last: last signal (per chain) + * @chain_signal_avg: signal average (per chain) */ struct sta_info { /* General information, mostly static */ -- cgit v1.2.3 From 4c4d41f200db375b2d2cc6d0a1de0606c8266398 Mon Sep 17 00:00:00 2001 From: Fan Du Date: Thu, 6 Jun 2013 10:15:54 +0800 Subject: xfrm: add LINUX_MIB_XFRMACQUIREERROR statistic counter When host ping its peer, ICMP echo request packet triggers IPsec policy, then host negotiates SA secret with its peer. After IKE installed SA for OUT direction, but before SA for IN direction installed, host get ICMP echo reply from its peer. At the time being, the SA state for IN direction could be XFRM_STATE_ACQ, then the received packet will be dropped after adding LINUX_MIB_XFRMINSTATEINVALID statistic. Adding a LINUX_MIB_XFRMACQUIREERROR statistic counter for such scenario when SA in larval state is much clearer for user than LINUX_MIB_XFRMINSTATEINVALID which indicates the SA is totally bad. Signed-off-by: Fan Du Signed-off-by: Steffen Klassert --- include/uapi/linux/snmp.h | 1 + net/xfrm/xfrm_input.c | 5 +++++ net/xfrm/xfrm_proc.c | 1 + 3 files changed, 7 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index df2e8b4f9c03..ea17542ff321 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -287,6 +287,7 @@ enum LINUX_MIB_XFRMOUTPOLERROR, /* XfrmOutPolError */ LINUX_MIB_XFRMFWDHDRERROR, /* XfrmFwdHdrError*/ LINUX_MIB_XFRMOUTSTATEINVALID, /* XfrmOutStateInvalid */ + LINUX_MIB_XFRMACQUIREERROR, /* XfrmAcquireError */ __LINUX_MIB_XFRMMAX }; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index ab2bb42fe094..88843996f935 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -163,6 +163,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); + if (unlikely(x->km.state == XFRM_STATE_ACQ)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); + goto drop_unlock; + } + if (unlikely(x->km.state != XFRM_STATE_VALID)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c index c721b0d9ab8b..80cd1e55b834 100644 --- a/net/xfrm/xfrm_proc.c +++ b/net/xfrm/xfrm_proc.c @@ -44,6 +44,7 @@ static const struct snmp_mib xfrm_mib_list[] = { SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR), SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR), SNMP_MIB_ITEM("XfrmOutStateInvalid", LINUX_MIB_XFRMOUTSTATEINVALID), + SNMP_MIB_ITEM("XfrmAcquireError", LINUX_MIB_XFRMACQUIREERROR), SNMP_MIB_SENTINEL }; -- cgit v1.2.3 From 350e7805706e11fef0bda2ab4b5cd5a0881480b9 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 6 Jun 2013 14:08:31 +0000 Subject: net: vlan: minor: remove unused HAVE_VLAN_PUT_TAG Remove the definition of HAVE_VLAN_PUT_TAG since it's not used or exported anywhere. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/if_vlan.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 52bd03b38962..7a9c8cf31659 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -243,8 +243,6 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, return skb; } -#define HAVE_VLAN_PUT_TAG - /** * vlan_put_tag - inserts VLAN tag according to device features * @skb: skbuff to tag -- cgit v1.2.3 From 28850dc7c71da9d0c0e39246e9ff6913f41f8d0a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 7 Jun 2013 05:11:46 +0000 Subject: net: tcp: move GRO/GSO functions to tcp_offload Would be good to make things explicit and move those functions to a new file called tcp_offload.c, thus make this similar to tcpv6_offload.c. While moving all related functions into tcp_offload.c, we can also make some of them static, since they are only used there. Also, add an explicit registration function. Suggested-by: Eric Dumazet Signed-off-by: Daniel Borkmann Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 9 +- net/ipv4/Makefile | 2 +- net/ipv4/af_inet.c | 13 +- net/ipv4/tcp.c | 241 ----------------------------------- net/ipv4/tcp_ipv4.c | 66 +--------- net/ipv4/tcp_offload.c | 332 +++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 341 insertions(+), 322 deletions(-) create mode 100644 net/ipv4/tcp_offload.c (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index bf1cc3dced5e..0d637e9403a5 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1541,15 +1541,14 @@ extern struct request_sock_ops tcp6_request_sock_ops; extern void tcp_v4_destroy_sock(struct sock *sk); -extern int tcp_v4_gso_send_check(struct sk_buff *skb); extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, netdev_features_t features); extern struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); -extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head, - struct sk_buff *skb); extern int tcp_gro_complete(struct sk_buff *skb); -extern int tcp4_gro_complete(struct sk_buff *skb); + +extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, + __be32 daddr); #ifdef CONFIG_PROC_FS extern int tcp4_proc_init(void); @@ -1584,6 +1583,8 @@ struct tcp_request_sock_ops { #endif }; +extern int tcpv4_offload_init(void); + extern void tcp_v4_init(void); extern void tcp_init(void); diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 089cb9f36387..4d3e138c564f 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -8,7 +8,7 @@ obj-y := route.o inetpeer.o protocol.o \ inet_timewait_sock.o inet_connection_sock.o \ tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \ - datagram.o raw.o udp.o udplite.o \ + tcp_offload.o datagram.o raw.o udp.o udplite.o \ arp.o icmp.o devinet.o af_inet.o igmp.o \ fib_frontend.o fib_semantics.o fib_trie.o \ inet_fragment.o ping.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9c090c7daea1..7b514290efc6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1559,15 +1559,6 @@ static const struct net_protocol tcp_protocol = { .netns_ok = 1, }; -static const struct net_offload tcp_offload = { - .callbacks = { - .gso_send_check = tcp_v4_gso_send_check, - .gso_segment = tcp_tso_segment, - .gro_receive = tcp4_gro_receive, - .gro_complete = tcp4_gro_complete, - }, -}; - static const struct net_protocol udp_protocol = { .handler = udp_rcv, .err_handler = udp_err, @@ -1683,8 +1674,8 @@ static int __init ipv4_offload_init(void) */ if (inet_add_offload(&udp_offload, IPPROTO_UDP) < 0) pr_crit("%s: Cannot add UDP protocol offload\n", __func__); - if (inet_add_offload(&tcp_offload, IPPROTO_TCP) < 0) - pr_crit("%s: Cannot add TCP protocol offlaod\n", __func__); + if (tcpv4_offload_init() < 0) + pr_crit("%s: Cannot add TCP protocol offload\n", __func__); dev_add_offload(&ip_packet_offload); return 0; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6a1cf95abc98..bc4246940f6c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2877,247 +2877,6 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname, EXPORT_SYMBOL(compat_tcp_getsockopt); #endif -struct sk_buff *tcp_tso_segment(struct sk_buff *skb, - netdev_features_t features) -{ - struct sk_buff *segs = ERR_PTR(-EINVAL); - struct tcphdr *th; - unsigned int thlen; - unsigned int seq; - __be32 delta; - unsigned int oldlen; - unsigned int mss; - struct sk_buff *gso_skb = skb; - __sum16 newcheck; - bool ooo_okay, copy_destructor; - - if (!pskb_may_pull(skb, sizeof(*th))) - goto out; - - th = tcp_hdr(skb); - thlen = th->doff * 4; - if (thlen < sizeof(*th)) - goto out; - - if (!pskb_may_pull(skb, thlen)) - goto out; - - oldlen = (u16)~skb->len; - __skb_pull(skb, thlen); - - mss = tcp_skb_mss(skb); - if (unlikely(skb->len <= mss)) - goto out; - - if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { - /* Packet is from an untrusted source, reset gso_segs. */ - int type = skb_shinfo(skb)->gso_type; - - if (unlikely(type & - ~(SKB_GSO_TCPV4 | - SKB_GSO_DODGY | - SKB_GSO_TCP_ECN | - SKB_GSO_TCPV6 | - SKB_GSO_GRE | - SKB_GSO_MPLS | - SKB_GSO_UDP_TUNNEL | - 0) || - !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) - goto out; - - skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); - - segs = NULL; - goto out; - } - - copy_destructor = gso_skb->destructor == tcp_wfree; - ooo_okay = gso_skb->ooo_okay; - /* All segments but the first should have ooo_okay cleared */ - skb->ooo_okay = 0; - - segs = skb_segment(skb, features); - if (IS_ERR(segs)) - goto out; - - /* Only first segment might have ooo_okay set */ - segs->ooo_okay = ooo_okay; - - delta = htonl(oldlen + (thlen + mss)); - - skb = segs; - th = tcp_hdr(skb); - seq = ntohl(th->seq); - - newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + - (__force u32)delta)); - - do { - th->fin = th->psh = 0; - th->check = newcheck; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - th->check = - csum_fold(csum_partial(skb_transport_header(skb), - thlen, skb->csum)); - - seq += mss; - if (copy_destructor) { - skb->destructor = gso_skb->destructor; - skb->sk = gso_skb->sk; - /* {tcp|sock}_wfree() use exact truesize accounting : - * sum(skb->truesize) MUST be exactly be gso_skb->truesize - * So we account mss bytes of 'true size' for each segment. - * The last segment will contain the remaining. - */ - skb->truesize = mss; - gso_skb->truesize -= mss; - } - skb = skb->next; - th = tcp_hdr(skb); - - th->seq = htonl(seq); - th->cwr = 0; - } while (skb->next); - - /* Following permits TCP Small Queues to work well with GSO : - * The callback to TCP stack will be called at the time last frag - * is freed at TX completion, and not right now when gso_skb - * is freed by GSO engine - */ - if (copy_destructor) { - swap(gso_skb->sk, skb->sk); - swap(gso_skb->destructor, skb->destructor); - swap(gso_skb->truesize, skb->truesize); - } - - delta = htonl(oldlen + (skb_tail_pointer(skb) - - skb_transport_header(skb)) + - skb->data_len); - th->check = ~csum_fold((__force __wsum)((__force u32)th->check + - (__force u32)delta)); - if (skb->ip_summed != CHECKSUM_PARTIAL) - th->check = csum_fold(csum_partial(skb_transport_header(skb), - thlen, skb->csum)); - -out: - return segs; -} -EXPORT_SYMBOL(tcp_tso_segment); - -struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) -{ - struct sk_buff **pp = NULL; - struct sk_buff *p; - struct tcphdr *th; - struct tcphdr *th2; - unsigned int len; - unsigned int thlen; - __be32 flags; - unsigned int mss = 1; - unsigned int hlen; - unsigned int off; - int flush = 1; - int i; - - off = skb_gro_offset(skb); - hlen = off + sizeof(*th); - th = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - th = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!th)) - goto out; - } - - thlen = th->doff * 4; - if (thlen < sizeof(*th)) - goto out; - - hlen = off + thlen; - if (skb_gro_header_hard(skb, hlen)) { - th = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!th)) - goto out; - } - - skb_gro_pull(skb, thlen); - - len = skb_gro_len(skb); - flags = tcp_flag_word(th); - - for (; (p = *head); head = &p->next) { - if (!NAPI_GRO_CB(p)->same_flow) - continue; - - th2 = tcp_hdr(p); - - if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { - NAPI_GRO_CB(p)->same_flow = 0; - continue; - } - - goto found; - } - - goto out_check_final; - -found: - flush = NAPI_GRO_CB(p)->flush; - flush |= (__force int)(flags & TCP_FLAG_CWR); - flush |= (__force int)((flags ^ tcp_flag_word(th2)) & - ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); - flush |= (__force int)(th->ack_seq ^ th2->ack_seq); - for (i = sizeof(*th); i < thlen; i += 4) - flush |= *(u32 *)((u8 *)th + i) ^ - *(u32 *)((u8 *)th2 + i); - - mss = tcp_skb_mss(p); - - flush |= (len - 1) >= mss; - flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); - - if (flush || skb_gro_receive(head, skb)) { - mss = 1; - goto out_check_final; - } - - p = *head; - th2 = tcp_hdr(p); - tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); - -out_check_final: - flush = len < mss; - flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | - TCP_FLAG_RST | TCP_FLAG_SYN | - TCP_FLAG_FIN)); - - if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) - pp = head; - -out: - NAPI_GRO_CB(skb)->flush |= flush; - - return pp; -} -EXPORT_SYMBOL(tcp_gro_receive); - -int tcp_gro_complete(struct sk_buff *skb) -{ - struct tcphdr *th = tcp_hdr(skb); - - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct tcphdr, check); - skb->ip_summed = CHECKSUM_PARTIAL; - - skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; - - if (th->cwr) - skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; - - return 0; -} -EXPORT_SYMBOL(tcp_gro_complete); - #ifdef CONFIG_TCP_MD5SIG static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; static DEFINE_MUTEX(tcp_md5sig_mutex); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d20ede0c9593..289039b4d8de 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -545,8 +545,7 @@ out: sock_put(sk); } -static void __tcp_v4_send_check(struct sk_buff *skb, - __be32 saddr, __be32 daddr) +void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct tcphdr *th = tcp_hdr(skb); @@ -571,23 +570,6 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(tcp_v4_send_check); -int tcp_v4_gso_send_check(struct sk_buff *skb) -{ - const struct iphdr *iph; - struct tcphdr *th; - - if (!pskb_may_pull(skb, sizeof(*th))) - return -EINVAL; - - iph = ip_hdr(skb); - th = tcp_hdr(skb); - - th->check = 0; - skb->ip_summed = CHECKSUM_PARTIAL; - __tcp_v4_send_check(skb, iph->saddr, iph->daddr); - return 0; -} - /* * This routine will send an RST to the other tcp. * @@ -2795,52 +2777,6 @@ void tcp4_proc_exit(void) } #endif /* CONFIG_PROC_FS */ -struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) -{ - const struct iphdr *iph = skb_gro_network_header(skb); - __wsum wsum; - __sum16 sum; - - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, - skb->csum)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } -flush: - NAPI_GRO_CB(skb)->flush = 1; - return NULL; - - case CHECKSUM_NONE: - wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr, - skb_gro_len(skb), IPPROTO_TCP, 0); - sum = csum_fold(skb_checksum(skb, - skb_gro_offset(skb), - skb_gro_len(skb), - wsum)); - if (sum) - goto flush; - - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } - - return tcp_gro_receive(head, skb); -} - -int tcp4_gro_complete(struct sk_buff *skb) -{ - const struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th = tcp_hdr(skb); - - th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), - iph->saddr, iph->daddr, 0); - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - - return tcp_gro_complete(skb); -} - struct proto tcp_prot = { .name = "TCP", .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c new file mode 100644 index 000000000000..3a7525e6c086 --- /dev/null +++ b/net/ipv4/tcp_offload.c @@ -0,0 +1,332 @@ +/* + * IPV4 GSO/GRO offload support + * Linux INET implementation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * TCPv4 GSO/GRO support + */ + +#include +#include +#include + +struct sk_buff *tcp_tso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + struct tcphdr *th; + unsigned int thlen; + unsigned int seq; + __be32 delta; + unsigned int oldlen; + unsigned int mss; + struct sk_buff *gso_skb = skb; + __sum16 newcheck; + bool ooo_okay, copy_destructor; + + if (!pskb_may_pull(skb, sizeof(*th))) + goto out; + + th = tcp_hdr(skb); + thlen = th->doff * 4; + if (thlen < sizeof(*th)) + goto out; + + if (!pskb_may_pull(skb, thlen)) + goto out; + + oldlen = (u16)~skb->len; + __skb_pull(skb, thlen); + + mss = tcp_skb_mss(skb); + if (unlikely(skb->len <= mss)) + goto out; + + if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { + /* Packet is from an untrusted source, reset gso_segs. */ + int type = skb_shinfo(skb)->gso_type; + + if (unlikely(type & + ~(SKB_GSO_TCPV4 | + SKB_GSO_DODGY | + SKB_GSO_TCP_ECN | + SKB_GSO_TCPV6 | + SKB_GSO_GRE | + SKB_GSO_MPLS | + SKB_GSO_UDP_TUNNEL | + 0) || + !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) + goto out; + + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); + + segs = NULL; + goto out; + } + + copy_destructor = gso_skb->destructor == tcp_wfree; + ooo_okay = gso_skb->ooo_okay; + /* All segments but the first should have ooo_okay cleared */ + skb->ooo_okay = 0; + + segs = skb_segment(skb, features); + if (IS_ERR(segs)) + goto out; + + /* Only first segment might have ooo_okay set */ + segs->ooo_okay = ooo_okay; + + delta = htonl(oldlen + (thlen + mss)); + + skb = segs; + th = tcp_hdr(skb); + seq = ntohl(th->seq); + + newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + + (__force u32)delta)); + + do { + th->fin = th->psh = 0; + th->check = newcheck; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + th->check = + csum_fold(csum_partial(skb_transport_header(skb), + thlen, skb->csum)); + + seq += mss; + if (copy_destructor) { + skb->destructor = gso_skb->destructor; + skb->sk = gso_skb->sk; + /* {tcp|sock}_wfree() use exact truesize accounting : + * sum(skb->truesize) MUST be exactly be gso_skb->truesize + * So we account mss bytes of 'true size' for each segment. + * The last segment will contain the remaining. + */ + skb->truesize = mss; + gso_skb->truesize -= mss; + } + skb = skb->next; + th = tcp_hdr(skb); + + th->seq = htonl(seq); + th->cwr = 0; + } while (skb->next); + + /* Following permits TCP Small Queues to work well with GSO : + * The callback to TCP stack will be called at the time last frag + * is freed at TX completion, and not right now when gso_skb + * is freed by GSO engine + */ + if (copy_destructor) { + swap(gso_skb->sk, skb->sk); + swap(gso_skb->destructor, skb->destructor); + swap(gso_skb->truesize, skb->truesize); + } + + delta = htonl(oldlen + (skb_tail_pointer(skb) - + skb_transport_header(skb)) + + skb->data_len); + th->check = ~csum_fold((__force __wsum)((__force u32)th->check + + (__force u32)delta)); + if (skb->ip_summed != CHECKSUM_PARTIAL) + th->check = csum_fold(csum_partial(skb_transport_header(skb), + thlen, skb->csum)); +out: + return segs; +} +EXPORT_SYMBOL(tcp_tso_segment); + +struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) +{ + struct sk_buff **pp = NULL; + struct sk_buff *p; + struct tcphdr *th; + struct tcphdr *th2; + unsigned int len; + unsigned int thlen; + __be32 flags; + unsigned int mss = 1; + unsigned int hlen; + unsigned int off; + int flush = 1; + int i; + + off = skb_gro_offset(skb); + hlen = off + sizeof(*th); + th = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, hlen)) { + th = skb_gro_header_slow(skb, hlen, off); + if (unlikely(!th)) + goto out; + } + + thlen = th->doff * 4; + if (thlen < sizeof(*th)) + goto out; + + hlen = off + thlen; + if (skb_gro_header_hard(skb, hlen)) { + th = skb_gro_header_slow(skb, hlen, off); + if (unlikely(!th)) + goto out; + } + + skb_gro_pull(skb, thlen); + + len = skb_gro_len(skb); + flags = tcp_flag_word(th); + + for (; (p = *head); head = &p->next) { + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + th2 = tcp_hdr(p); + + if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + + goto found; + } + + goto out_check_final; + +found: + flush = NAPI_GRO_CB(p)->flush; + flush |= (__force int)(flags & TCP_FLAG_CWR); + flush |= (__force int)((flags ^ tcp_flag_word(th2)) & + ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); + flush |= (__force int)(th->ack_seq ^ th2->ack_seq); + for (i = sizeof(*th); i < thlen; i += 4) + flush |= *(u32 *)((u8 *)th + i) ^ + *(u32 *)((u8 *)th2 + i); + + mss = tcp_skb_mss(p); + + flush |= (len - 1) >= mss; + flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); + + if (flush || skb_gro_receive(head, skb)) { + mss = 1; + goto out_check_final; + } + + p = *head; + th2 = tcp_hdr(p); + tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); + +out_check_final: + flush = len < mss; + flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | + TCP_FLAG_RST | TCP_FLAG_SYN | + TCP_FLAG_FIN)); + + if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) + pp = head; + +out: + NAPI_GRO_CB(skb)->flush |= flush; + + return pp; +} +EXPORT_SYMBOL(tcp_gro_receive); + +int tcp_gro_complete(struct sk_buff *skb) +{ + struct tcphdr *th = tcp_hdr(skb); + + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + skb->ip_summed = CHECKSUM_PARTIAL; + + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + + if (th->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + + return 0; +} +EXPORT_SYMBOL(tcp_gro_complete); + +static int tcp_v4_gso_send_check(struct sk_buff *skb) +{ + const struct iphdr *iph; + struct tcphdr *th; + + if (!pskb_may_pull(skb, sizeof(*th))) + return -EINVAL; + + iph = ip_hdr(skb); + th = tcp_hdr(skb); + + th->check = 0; + skb->ip_summed = CHECKSUM_PARTIAL; + __tcp_v4_send_check(skb, iph->saddr, iph->daddr); + return 0; +} + +static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) +{ + const struct iphdr *iph = skb_gro_network_header(skb); + __wsum wsum; + __sum16 sum; + + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, + skb->csum)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } +flush: + NAPI_GRO_CB(skb)->flush = 1; + return NULL; + + case CHECKSUM_NONE: + wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr, + skb_gro_len(skb), IPPROTO_TCP, 0); + sum = csum_fold(skb_checksum(skb, + skb_gro_offset(skb), + skb_gro_len(skb), + wsum)); + if (sum) + goto flush; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } + + return tcp_gro_receive(head, skb); +} + +static int tcp4_gro_complete(struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + return tcp_gro_complete(skb); +} + +static const struct net_offload tcpv4_offload = { + .callbacks = { + .gso_send_check = tcp_v4_gso_send_check, + .gso_segment = tcp_tso_segment, + .gro_receive = tcp4_gro_receive, + .gro_complete = tcp4_gro_complete, + }, +}; + +int __init tcpv4_offload_init(void) +{ + return inet_add_offload(&tcpv4_offload, IPPROTO_TCP); +} -- cgit v1.2.3 From 1d2f41ed23343e083566339574807ca7ea75dbba Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 5 Jun 2013 23:54:35 +0000 Subject: macvlan: switch to use IS_ENABLED() Acked-by: Michael S. Tsirkin Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/if_macvlan.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 84dde1dd1da4..e47ad46a477c 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -8,7 +8,7 @@ #include #include -#if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE) +#if IS_ENABLED(CONFIG_MACVTAP) struct socket *macvtap_get_socket(struct file *); #else #include -- cgit v1.2.3 From f0afce01aa639e5164cf73f063b81a8b95619c3a Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 5 Jun 2013 23:54:37 +0000 Subject: macvlan: change the max number of queues to 16 Macvtap should be at least compatible with tap, so change the max number to 16. Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/linux/if_macvlan.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index e47ad46a477c..62d8bda67874 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -50,7 +50,7 @@ struct macvlan_pcpu_stats { * Maximum times a macvtap device can be opened. This can be used to * configure the number of receive queue, e.g. for multiqueue virtio. */ -#define MAX_MACVTAP_QUEUES (NR_CPUS < 16 ? NR_CPUS : 16) +#define MAX_MACVTAP_QUEUES 16 #define MACVLAN_MC_FILTER_BITS 8 #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) -- cgit v1.2.3 From 815f236d622721b54f3963ba59dad98b02cdeabf Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 5 Jun 2013 23:54:39 +0000 Subject: macvtap: add TUNSETQUEUE ioctl This patch adds TUNSETQUEUE ioctl to let userspace can temporarily disable or enable a queue of macvtap. This is used to be compatible at API layer of tuntap to simplify the userspace to manage the queues. This is done through introducing a linked list to track all taps while using vlan->taps array to only track active taps. Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/macvtap.c | 135 ++++++++++++++++++++++++++++++++++++++------- include/linux/if_macvlan.h | 4 ++ 2 files changed, 120 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 5ccba99b15f4..d2d1d5578b61 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -45,6 +45,8 @@ struct macvtap_queue { struct file *file; unsigned int flags; u16 queue_index; + bool enabled; + struct list_head next; }; static struct proto macvtap_proto = { @@ -85,14 +87,36 @@ static const struct proto_ops macvtap_socket_ops; */ static DEFINE_SPINLOCK(macvtap_lock); -static int macvtap_set_queue(struct net_device *dev, struct file *file, +static int macvtap_enable_queue(struct net_device *dev, struct file *file, struct macvtap_queue *q) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + int err = -EINVAL; + + spin_lock(&macvtap_lock); + + if (q->enabled) + goto out; + + err = 0; + rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); + q->queue_index = vlan->numvtaps; + q->enabled = true; + + vlan->numvtaps++; +out: + spin_unlock(&macvtap_lock); + return err; +} + +static int macvtap_set_queue(struct net_device *dev, struct file *file, + struct macvtap_queue *q) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EBUSY; spin_lock(&macvtap_lock); - if (vlan->numvtaps == MAX_MACVTAP_QUEUES) + if (vlan->numqueues == MAX_MACVTAP_QUEUES) goto out; err = 0; @@ -102,15 +126,57 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file, q->file = file; q->queue_index = vlan->numvtaps; + q->enabled = true; file->private_data = q; + list_add_tail(&q->next, &vlan->queue_list); vlan->numvtaps++; + vlan->numqueues++; out: spin_unlock(&macvtap_lock); return err; } +static int __macvtap_disable_queue(struct macvtap_queue *q) +{ + struct macvlan_dev *vlan; + struct macvtap_queue *nq; + + vlan = rcu_dereference_protected(q->vlan, + lockdep_is_held(&macvtap_lock)); + + if (!q->enabled) + return -EINVAL; + + if (vlan) { + int index = q->queue_index; + BUG_ON(index >= vlan->numvtaps); + nq = rcu_dereference_protected(vlan->taps[vlan->numvtaps - 1], + lockdep_is_held(&macvtap_lock)); + nq->queue_index = index; + + rcu_assign_pointer(vlan->taps[index], nq); + RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); + q->enabled = false; + + vlan->numvtaps--; + } + + return 0; +} + +static int macvtap_disable_queue(struct macvtap_queue *q) +{ + int err; + + spin_lock(&macvtap_lock); + err = __macvtap_disable_queue(q); + spin_unlock(&macvtap_lock); + + return err; +} + /* * The file owning the queue got closed, give up both * the reference that the files holds as well as the @@ -121,25 +187,19 @@ out: */ static void macvtap_put_queue(struct macvtap_queue *q) { - struct macvtap_queue *nq; struct macvlan_dev *vlan; spin_lock(&macvtap_lock); vlan = rcu_dereference_protected(q->vlan, lockdep_is_held(&macvtap_lock)); if (vlan) { - int index = q->queue_index; - BUG_ON(index >= vlan->numvtaps); - - nq = rcu_dereference_protected(vlan->taps[vlan->numvtaps - 1], - lockdep_is_held(&macvtap_lock)); - rcu_assign_pointer(vlan->taps[index], nq); - nq->queue_index = index; + if (q->enabled) + BUG_ON(__macvtap_disable_queue(q)); - RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); + vlan->numqueues--; RCU_INIT_POINTER(q->vlan, NULL); sock_put(&q->sk); - --vlan->numvtaps; + list_del_init(&q->next); } spin_unlock(&macvtap_lock); @@ -160,6 +220,11 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, { struct macvlan_dev *vlan = netdev_priv(dev); struct macvtap_queue *tap = NULL; + /* Access to taps array is protected by rcu, but access to numvtaps + * isn't. Below we use it to lookup a queue, but treat it as a hint + * and validate that the result isn't NULL - in case we are + * racing against queue removal. + */ int numvtaps = ACCESS_ONCE(vlan->numvtaps); __u32 rxq; @@ -196,18 +261,22 @@ out: static void macvtap_del_queues(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); - struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES]; + struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES]; int i, j = 0; spin_lock(&macvtap_lock); - for (i = 0; i < vlan->numvtaps; i++) { - q = rcu_dereference_protected(vlan->taps[i], - lockdep_is_held(&macvtap_lock)); - BUG_ON(q == NULL); + list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { + list_del_init(&q->next); qlist[j++] = q; - RCU_INIT_POINTER(vlan->taps[i], NULL); RCU_INIT_POINTER(q->vlan, NULL); + if (q->enabled) + vlan->numvtaps--; + vlan->numqueues--; } + for (i = 0; i < vlan->numvtaps; i++) + RCU_INIT_POINTER(vlan->taps[i], NULL); + BUG_ON(vlan->numvtaps); + BUG_ON(vlan->numqueues); /* guarantee that any future macvtap_set_queue will fail */ vlan->numvtaps = MAX_MACVTAP_QUEUES; spin_unlock(&macvtap_lock); @@ -298,6 +367,9 @@ static int macvtap_newlink(struct net *src_net, struct nlattr *tb[], struct nlattr *data[]) { + struct macvlan_dev *vlan = netdev_priv(dev); + INIT_LIST_HEAD(&vlan->queue_list); + /* Don't put anything that may fail after macvlan_common_newlink * because we can't undo what it does. */ @@ -887,6 +959,25 @@ static void macvtap_put_vlan(struct macvlan_dev *vlan) dev_put(vlan->dev); } +static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) +{ + struct macvtap_queue *q = file->private_data; + struct macvlan_dev *vlan; + int ret; + + vlan = macvtap_get_vlan(q); + if (!vlan) + return -EINVAL; + + if (flags & IFF_ATTACH_QUEUE) + ret = macvtap_enable_queue(vlan->dev, file, q); + else if (flags & IFF_DETACH_QUEUE) + ret = macvtap_disable_queue(q); + + macvtap_put_vlan(vlan); + return ret; +} + /* * provide compatibility with generic tun/tap interface */ @@ -910,7 +1001,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return -EFAULT; ret = 0; - if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP)) + if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) != + (IFF_NO_PI | IFF_TAP)) ret = -EINVAL; else q->flags = u; @@ -929,6 +1021,11 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, macvtap_put_vlan(vlan); return ret; + case TUNSETQUEUE: + if (get_user(u, &ifr->ifr_flags)) + return -EFAULT; + return macvtap_ioctl_set_queue(file, u); + case TUNGETFEATURES: if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up)) return -EFAULT; diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 62d8bda67874..19121331a5ed 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -69,8 +69,12 @@ struct macvlan_dev { u16 flags; int (*receive)(struct sk_buff *skb); int (*forward)(struct net_device *dev, struct sk_buff *skb); + /* This array tracks active taps. */ struct macvtap_queue *taps[MAX_MACVTAP_QUEUES]; + /* This list tracks all taps (both enabled and disabled) */ + struct list_head queue_list; int numvtaps; + int numqueues; int minor; }; -- cgit v1.2.3 From af12fa6e46aa651e7b86a4c4117b562518fef184 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 10 Jun 2013 11:39:41 +0300 Subject: net: add napi_id and hash Adds a napi_id and a hashing mechanism to lookup a napi by id. This will be used by subsequent patches to implement low latency Ethernet device polling. Based on a code sample by Eric Dumazet. Signed-off-by: Eliezer Tamir Signed-off-by: Eric Dumazet Tested-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/netdevice.h | 29 +++++++++++++++++++++++ net/core/dev.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8f967e34142b..39bbd462d68e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -324,12 +324,15 @@ struct napi_struct { struct sk_buff *gro_list; struct sk_buff *skb; struct list_head dev_list; + struct hlist_node napi_hash_node; + unsigned int napi_id; }; enum { NAPI_STATE_SCHED, /* Poll is scheduled */ NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ + NAPI_STATE_HASHED, /* In NAPI hash */ }; enum gro_result { @@ -445,6 +448,32 @@ static inline bool napi_reschedule(struct napi_struct *napi) extern void __napi_complete(struct napi_struct *n); extern void napi_complete(struct napi_struct *n); +/** + * napi_by_id - lookup a NAPI by napi_id + * @napi_id: hashed napi_id + * + * lookup @napi_id in napi_hash table + * must be called under rcu_read_lock() + */ +extern struct napi_struct *napi_by_id(unsigned int napi_id); + +/** + * napi_hash_add - add a NAPI to global hashtable + * @napi: napi context + * + * generate a new napi_id and store a @napi under it in napi_hash + */ +extern void napi_hash_add(struct napi_struct *napi); + +/** + * napi_hash_del - remove a NAPI from global table + * @napi: napi context + * + * Warning: caller must observe rcu grace period + * before freeing memory containing @napi + */ +extern void napi_hash_del(struct napi_struct *napi); + /** * napi_disable - prevent NAPI from scheduling * @n: napi context diff --git a/net/core/dev.c b/net/core/dev.c index 9c18557f93c6..fa007dba6beb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -129,6 +129,7 @@ #include #include #include +#include #include "net-sysfs.h" @@ -166,6 +167,12 @@ static struct list_head offload_base __read_mostly; DEFINE_RWLOCK(dev_base_lock); EXPORT_SYMBOL(dev_base_lock); +/* protects napi_hash addition/deletion and napi_gen_id */ +static DEFINE_SPINLOCK(napi_hash_lock); + +static unsigned int napi_gen_id; +static DEFINE_HASHTABLE(napi_hash, 8); + seqcount_t devnet_rename_seq; static inline void dev_base_seq_inc(struct net *net) @@ -4136,6 +4143,58 @@ void napi_complete(struct napi_struct *n) } EXPORT_SYMBOL(napi_complete); +/* must be called under rcu_read_lock(), as we dont take a reference */ +struct napi_struct *napi_by_id(unsigned int napi_id) +{ + unsigned int hash = napi_id % HASH_SIZE(napi_hash); + struct napi_struct *napi; + + hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) + if (napi->napi_id == napi_id) + return napi; + + return NULL; +} +EXPORT_SYMBOL_GPL(napi_by_id); + +void napi_hash_add(struct napi_struct *napi) +{ + if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) { + + spin_lock(&napi_hash_lock); + + /* 0 is not a valid id, we also skip an id that is taken + * we expect both events to be extremely rare + */ + napi->napi_id = 0; + while (!napi->napi_id) { + napi->napi_id = ++napi_gen_id; + if (napi_by_id(napi->napi_id)) + napi->napi_id = 0; + } + + hlist_add_head_rcu(&napi->napi_hash_node, + &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); + + spin_unlock(&napi_hash_lock); + } +} +EXPORT_SYMBOL_GPL(napi_hash_add); + +/* Warning : caller is responsible to make sure rcu grace period + * is respected before freeing memory containing @napi + */ +void napi_hash_del(struct napi_struct *napi) +{ + spin_lock(&napi_hash_lock); + + if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) + hlist_del_rcu(&napi->napi_hash_node); + + spin_unlock(&napi_hash_lock); +} +EXPORT_SYMBOL_GPL(napi_hash_del); + void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { -- cgit v1.2.3 From 060212928670593fb89243640bf05cf89560b023 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 10 Jun 2013 11:39:50 +0300 Subject: net: add low latency socket poll Adds an ndo_ll_poll method and the code that supports it. This method can be used by low latency applications to busy-poll Ethernet device queues directly from the socket code. sysctl_net_ll_poll controls how many microseconds to poll. Default is zero (disabled). Individual protocol support will be added by subsequent patches. Signed-off-by: Alexander Duyck Signed-off-by: Jesse Brandeburg Signed-off-by: Eliezer Tamir Acked-by: Eric Dumazet Tested-by: Willem de Bruijn Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 7 ++ include/linux/netdevice.h | 3 + include/linux/skbuff.h | 8 ++- include/net/ll_poll.h | 148 +++++++++++++++++++++++++++++++++++++++++++ include/net/sock.h | 4 ++ include/uapi/linux/snmp.h | 1 + net/Kconfig | 12 ++++ net/core/skbuff.c | 4 ++ net/core/sock.c | 6 ++ net/core/sysctl_net_core.c | 10 +++ net/ipv4/proc.c | 1 + net/socket.c | 6 ++ 12 files changed, 208 insertions(+), 2 deletions(-) create mode 100644 include/net/ll_poll.h (limited to 'include') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index c1f8640c2fc8..85ab72dcdc3c 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -50,6 +50,13 @@ The maximum number of packets that kernel can handle on a NAPI interrupt, it's a Per-CPU variable. Default: 64 +low_latency_poll +---------------- +Low latency busy poll timeout. (needs CONFIG_NET_LL_RX_POLL) +Approximate time in us to spin waiting for packets on the device queue. +Recommended value is 50. May increase power usage. +Default: 0 (off) + rmem_default ------------ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 39bbd462d68e..2ecb96d9a1e5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -971,6 +971,9 @@ struct net_device_ops { struct netpoll_info *info, gfp_t gfp); void (*ndo_netpoll_cleanup)(struct net_device *dev); +#endif +#ifdef CONFIG_NET_LL_RX_POLL + int (*ndo_ll_poll)(struct napi_struct *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, int queue, u8 *mac); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9995834d2cb6..400d82ae2b03 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -386,6 +386,7 @@ typedef unsigned char *sk_buff_data_t; * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions + * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark * @dropcount: total number of sk_receive_queue overflows @@ -500,8 +501,11 @@ struct sk_buff { /* 7/9 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); -#ifdef CONFIG_NET_DMA - dma_cookie_t dma_cookie; +#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL + union { + unsigned int napi_id; + dma_cookie_t dma_cookie; + }; #endif #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h new file mode 100644 index 000000000000..bc262f88173f --- /dev/null +++ b/include/net/ll_poll.h @@ -0,0 +1,148 @@ +/* + * Low Latency Sockets + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Author: Eliezer Tamir + * + * Contact Information: + * e1000-devel Mailing List + */ + +/* + * For now this depends on CONFIG_X86_TSC + */ + +#ifndef _LINUX_NET_LL_POLL_H +#define _LINUX_NET_LL_POLL_H + +#include +#include + +#ifdef CONFIG_NET_LL_RX_POLL + +struct napi_struct; +extern unsigned long sysctl_net_ll_poll __read_mostly; + +/* return values from ndo_ll_poll */ +#define LL_FLUSH_FAILED -1 +#define LL_FLUSH_BUSY -2 + +/* we don't mind a ~2.5% imprecision */ +#define TSC_MHZ (tsc_khz >> 10) + +static inline cycles_t ll_end_time(void) +{ + return TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll) + get_cycles(); +} + +static inline bool sk_valid_ll(struct sock *sk) +{ + return sysctl_net_ll_poll && sk->sk_napi_id && + !need_resched() && !signal_pending(current); +} + +static inline bool can_poll_ll(cycles_t end_time) +{ + return !time_after((unsigned long)get_cycles(), + (unsigned long)end_time); +} + +static inline bool sk_poll_ll(struct sock *sk, int nonblock) +{ + cycles_t end_time = ll_end_time(); + const struct net_device_ops *ops; + struct napi_struct *napi; + int rc = false; + + /* + * rcu read lock for napi hash + * bh so we don't race with net_rx_action + */ + rcu_read_lock_bh(); + + napi = napi_by_id(sk->sk_napi_id); + if (!napi) + goto out; + + ops = napi->dev->netdev_ops; + if (!ops->ndo_ll_poll) + goto out; + + do { + + rc = ops->ndo_ll_poll(napi); + + if (rc == LL_FLUSH_FAILED) + break; /* permanent failure */ + + if (rc > 0) + /* local bh are disabled so it is ok to use _BH */ + NET_ADD_STATS_BH(sock_net(sk), + LINUX_MIB_LOWLATENCYRXPACKETS, rc); + + } while (skb_queue_empty(&sk->sk_receive_queue) + && can_poll_ll(end_time) && !nonblock); + + rc = !skb_queue_empty(&sk->sk_receive_queue); +out: + rcu_read_unlock_bh(); + return rc; +} + +/* used in the NIC receive handler to mark the skb */ +static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) +{ + skb->napi_id = napi->napi_id; +} + +/* used in the protocol hanlder to propagate the napi_id to the socket */ +static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) +{ + sk->sk_napi_id = skb->napi_id; +} + +#else /* CONFIG_NET_LL_RX_POLL */ + +static inline cycles_t ll_end_time(void) +{ + return 0; +} + +static inline bool sk_valid_ll(struct sock *sk) +{ + return false; +} + +static inline bool sk_poll_ll(struct sock *sk, int nonblock) +{ + return false; +} + +static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) +{ +} + +static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) +{ +} + +static inline bool can_poll_ll(cycles_t end_time) +{ + return false; +} + +#endif /* CONFIG_NET_LL_RX_POLL */ +#endif /* _LINUX_NET_LL_POLL_H */ diff --git a/include/net/sock.h b/include/net/sock.h index 66772cf8c3c5..ac8e1818380c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -229,6 +229,7 @@ struct cg_proto; * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward + * @sk_napi_id: id of the last napi context to receive data for sk * @sk_allocation: allocation mode * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, @@ -324,6 +325,9 @@ struct sock { int sk_forward_alloc; #ifdef CONFIG_RPS __u32 sk_rxhash; +#endif +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int sk_napi_id; #endif atomic_t sk_drops; int sk_rcvbuf; diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index df2e8b4f9c03..26cbf76f8058 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -253,6 +253,7 @@ enum LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ + LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ __LINUX_MIB_MAX }; diff --git a/net/Kconfig b/net/Kconfig index 523e43e6da1b..d6a9ce6e1800 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -243,6 +243,18 @@ config NETPRIO_CGROUP Cgroup subsystem for use in assigning processes to network priorities on a per-interface basis +config NET_LL_RX_POLL + bool "Low Latency Receive Poll" + depends on X86_TSC + default n + ---help--- + Support Low Latency Receive Queue Poll. + (For network card drivers which support this option.) + When waiting for data in read or poll call directly into the the device driver + to flush packets which may be pending on the device queues into the stack. + + If unsure, say N. + config BQL boolean depends on SYSFS diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 73f57a0e1523..4a4181e16c1a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -733,6 +733,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->vlan_tci = old->vlan_tci; skb_copy_secmark(new, old); + +#ifdef CONFIG_NET_LL_RX_POLL + new->napi_id = old->napi_id; +#endif } /* diff --git a/net/core/sock.c b/net/core/sock.c index 88868a9d21da..788c0da5eed1 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -139,6 +139,8 @@ #include #endif +#include + static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); @@ -2284,6 +2286,10 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_stamp = ktime_set(-1L, 0); +#ifdef CONFIG_NET_LL_RX_POLL + sk->sk_napi_id = 0; +#endif + /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 741db5fc7806..4b48f39582b0 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -19,6 +19,7 @@ #include #include #include +#include static int one = 1; @@ -284,6 +285,15 @@ static struct ctl_table net_core_table[] = { .proc_handler = flow_limit_table_len_sysctl }, #endif /* CONFIG_NET_FLOW_LIMIT */ +#ifdef CONFIG_NET_LL_RX_POLL + { + .procname = "low_latency_poll", + .data = &sysctl_net_ll_poll, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax + }, +#endif #endif /* CONFIG_NET */ { .procname = "netdev_budget", diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 2a5bf86d2415..6577a1149a47 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -273,6 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), + SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), SNMP_MIB_SENTINEL }; diff --git a/net/socket.c b/net/socket.c index 3ebdcb805c51..21fd29f63ed2 100644 --- a/net/socket.c +++ b/net/socket.c @@ -104,6 +104,12 @@ #include #include #include +#include + +#ifdef CONFIG_NET_LL_RX_POLL +unsigned long sysctl_net_ll_poll __read_mostly; +EXPORT_SYMBOL_GPL(sysctl_net_ll_poll); +#endif static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, -- cgit v1.2.3 From 30f3a40f9a2a2869a560a9cb9ef488d10c803e14 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 5 Jun 2013 20:14:10 +0800 Subject: net: remove last caller of skb_tail_offset() and itself Similar to the following commits: commit 00f97da17a0c8d656d0c9 (netpoll: fix position of network header) commit 525cebedb32a87fa48584 (pktgen: Fix position of ip and udp header) using skb_tail_offset() seems not correct since the offset is based on head pointer. With the last caller removed, skb_tail_offset() can be killed finally. Cc: Thomas Graf Cc: Daniel Borkmann Cc: David S. Miller Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 8 -------- net/ipv4/ipmr.c | 8 +------- 2 files changed, 1 insertion(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 400d82ae2b03..a7393adea0b5 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1396,10 +1396,6 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) skb->tail += offset; } -static inline unsigned long skb_tail_offset(const struct sk_buff *skb) -{ - return skb->tail; -} #else /* NET_SKBUFF_DATA_USES_OFFSET */ static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) { @@ -1416,10 +1412,6 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) skb->tail = skb->data + offset; } -static inline unsigned long skb_tail_offset(const struct sk_buff *skb) -{ - return skb->tail - skb->head; -} #endif /* NET_SKBUFF_DATA_USES_OFFSET */ /* diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index df97f0ac1a1c..132a09664704 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -945,7 +945,6 @@ static int ipmr_cache_report(struct mr_table *mrt, struct igmpmsg *msg; struct sock *mroute_sk; int ret; - unsigned long tail_offset; #ifdef CONFIG_IP_PIMSM if (assert == IGMPMSG_WHOLEPKT) @@ -981,12 +980,7 @@ static int ipmr_cache_report(struct mr_table *mrt, /* Copy the IP header */ - tail_offset = skb_tail_offset(skb); - if (tail_offset > 0xffff) { - kfree_skb(skb); - return -EINVAL; - } - skb_set_network_header(skb, tail_offset); + skb_set_network_header(skb, skb->len); skb_put(skb, ihl); skb_copy_to_linear_data(skb, pkt->data, ihl); ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ -- cgit v1.2.3 From 9ba18891f75535eca3ef53138b48970eb60f5255 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Wed, 5 Jun 2013 10:08:00 -0400 Subject: bridge: Add flag to control mac learning. Allow user to control whether mac learning is enabled on the port. By default, mac learning is enabled. Disabling mac learning will cause new dynamic FDB entries to not be created for a particular port. Signed-off-by: Vlad Yasevich Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/bridge/br_if.c | 2 +- net/bridge/br_input.c | 6 ++++-- net/bridge/br_netlink.c | 6 +++++- net/bridge/br_private.h | 1 + net/bridge/br_sysfs_if.c | 2 ++ 6 files changed, 14 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index b05823cae784..8643809d8417 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -221,6 +221,7 @@ enum { IFLA_BRPORT_GUARD, /* bpdu guard */ IFLA_BRPORT_PROTECT, /* root port protection */ IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ + IFLA_BRPORT_LEARNING, /* mac learning */ __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 4cdba60926ff..2c08911df578 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -221,7 +221,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; p->port_no = index; - p->flags = 0; + p->flags = BR_LEARNING; br_init_port(p); p->state = BR_STATE_DISABLED; br_stp_port_timer_init(p); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 828e2bcc1f52..7e993667d4bf 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -75,7 +75,8 @@ int br_handle_frame_finish(struct sk_buff *skb) /* insert into forwarding database after filtering to avoid spoofing */ br = p->br; - br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); + if (p->flags & BR_LEARNING) + br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && br_multicast_rcv(br, p, skb)) @@ -142,7 +143,8 @@ static int br_handle_local_finish(struct sk_buff *skb) u16 vid = 0; br_vlan_get_tag(skb, &vid); - br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid); + if (p->flags & BR_LEARNING) + br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid); return 0; /* process further */ } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 8e3abf564798..ce902bf8a618 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -30,6 +30,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + 0; } @@ -56,7 +57,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || - nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE))) + nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || + nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING))) return -EMSGSIZE; return 0; @@ -281,6 +283,7 @@ static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, + [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ @@ -328,6 +331,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); + br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 1b0ac95a5c37..04d7f43508f7 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -158,6 +158,7 @@ struct net_bridge_port #define BR_ROOT_BLOCK 0x00000004 #define BR_MULTICAST_FAST_LEAVE 0x00000008 #define BR_ADMIN_COST 0x00000010 +#define BR_LEARNING 0x00000020 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING u32 multicast_startup_queries_sent; diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index a1ef1b6e14dc..707f3628e9cd 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -158,6 +158,7 @@ static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush); BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE); BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); +BRPORT_ATTR_FLAG(learning, BR_LEARNING); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) @@ -195,6 +196,7 @@ static const struct brport_attribute *brport_attrs[] = { &brport_attr_hairpin_mode, &brport_attr_bpdu_guard, &brport_attr_root_block, + &brport_attr_learning, #ifdef CONFIG_BRIDGE_IGMP_SNOOPING &brport_attr_multicast_router, &brport_attr_multicast_fast_leave, -- cgit v1.2.3 From 867a59436fc35593ae0e0efcd56cc6d2f8506586 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Wed, 5 Jun 2013 10:08:01 -0400 Subject: bridge: Add a flag to control unicast packet flood. Add a flag to control flood of unicast traffic. By default, flood is on and the bridge will flood unicast traffic if it doesn't know the destination. When the flag is turned off, unicast traffic without an FDB will not be forwarded to the specified port. Signed-off-by: Vlad Yasevich Reviewed-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/bridge/br_device.c | 8 ++++---- net/bridge/br_forward.c | 14 +++++++++----- net/bridge/br_if.c | 2 +- net/bridge/br_input.c | 9 ++++++--- net/bridge/br_netlink.c | 6 +++++- net/bridge/br_private.h | 6 ++++-- net/bridge/br_sysfs_if.c | 2 ++ 8 files changed, 32 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 8643809d8417..da05a2698cb5 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -222,6 +222,7 @@ enum { IFLA_BRPORT_PROTECT, /* root port protection */ IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_LEARNING, /* mac learning */ + IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 75f3239130f8..2ef66781fedb 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -58,10 +58,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_pull(skb, ETH_HLEN); if (is_broadcast_ether_addr(dest)) - br_flood_deliver(br, skb); + br_flood_deliver(br, skb, false); else if (is_multicast_ether_addr(dest)) { if (unlikely(netpoll_tx_running(dev))) { - br_flood_deliver(br, skb); + br_flood_deliver(br, skb, false); goto out; } if (br_multicast_rcv(br, NULL, skb)) { @@ -73,11 +73,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) br_multicast_deliver(mdst, skb); else - br_flood_deliver(br, skb); + br_flood_deliver(br, skb, false); } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL) br_deliver(dst->dst, skb); else - br_flood_deliver(br, skb); + br_flood_deliver(br, skb, true); out: rcu_read_unlock(); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 092b20e4ee4c..4b81b1471789 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -174,7 +174,8 @@ out: static void br_flood(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)(const struct net_bridge_port *p, - struct sk_buff *skb)) + struct sk_buff *skb), + bool unicast) { struct net_bridge_port *p; struct net_bridge_port *prev; @@ -182,6 +183,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, prev = NULL; list_for_each_entry_rcu(p, &br->port_list, list) { + /* Do not flood unicast traffic to ports that turn it off */ + if (unicast && !(p->flags & BR_FLOOD)) + continue; prev = maybe_deliver(prev, p, skb, __packet_hook); if (IS_ERR(prev)) goto out; @@ -203,16 +207,16 @@ out: /* called with rcu_read_lock */ -void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) +void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) { - br_flood(br, skb, NULL, __br_deliver); + br_flood(br, skb, NULL, __br_deliver, unicast); } /* called under bridge lock */ void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, - struct sk_buff *skb2) + struct sk_buff *skb2, bool unicast) { - br_flood(br, skb, skb2, __br_forward); + br_flood(br, skb, skb2, __br_forward, unicast); } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 2c08911df578..5623be6b9ecd 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -221,7 +221,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; p->port_no = index; - p->flags = BR_LEARNING; + p->flags = BR_LEARNING | BR_FLOOD; br_init_port(p); p->state = BR_STATE_DISABLED; br_stp_port_timer_init(p); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 7e993667d4bf..1b8b8b824cd7 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -65,6 +65,7 @@ int br_handle_frame_finish(struct sk_buff *skb) struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; struct sk_buff *skb2; + bool unicast = true; u16 vid = 0; if (!p || p->state == BR_STATE_DISABLED) @@ -95,9 +96,10 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; - if (is_broadcast_ether_addr(dest)) + if (is_broadcast_ether_addr(dest)) { skb2 = skb; - else if (is_multicast_ether_addr(dest)) { + unicast = false; + } else if (is_multicast_ether_addr(dest)) { mdst = br_mdb_get(br, skb, vid); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if ((mdst && mdst->mglist) || @@ -110,6 +112,7 @@ int br_handle_frame_finish(struct sk_buff *skb) } else skb2 = skb; + unicast = false; br->dev->stats.multicast++; } else if ((dst = __br_fdb_get(br, dest, vid)) && dst->is_local) { @@ -123,7 +126,7 @@ int br_handle_frame_finish(struct sk_buff *skb) dst->used = jiffies; br_forward(dst->dst, skb, skb2); } else - br_flood_forward(br, skb, skb2); + br_flood_forward(br, skb, skb2, unicast); } if (skb2) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index ce902bf8a618..1fc30abd3a52 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -31,6 +31,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + 0; } @@ -58,7 +59,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || - nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING))) + nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || + nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD))) return -EMSGSIZE; return 0; @@ -284,6 +286,7 @@ static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, + [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ @@ -332,6 +335,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); + br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 04d7f43508f7..3be89b3ce17b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -159,6 +159,7 @@ struct net_bridge_port #define BR_MULTICAST_FAST_LEAVE 0x00000008 #define BR_ADMIN_COST 0x00000010 #define BR_LEARNING 0x00000020 +#define BR_FLOOD 0x00000040 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING u32 multicast_startup_queries_sent; @@ -414,9 +415,10 @@ extern int br_dev_queue_push_xmit(struct sk_buff *skb); extern void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0); extern int br_forward_finish(struct sk_buff *skb); -extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); +extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, + bool unicast); extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, - struct sk_buff *skb2); + struct sk_buff *skb2, bool unicast); /* br_if.c */ extern void br_port_carrier_check(struct net_bridge_port *p); diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 707f3628e9cd..2a2cdb756d51 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -159,6 +159,7 @@ BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE); BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); BRPORT_ATTR_FLAG(learning, BR_LEARNING); +BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) @@ -197,6 +198,7 @@ static const struct brport_attribute *brport_attrs[] = { &brport_attr_bpdu_guard, &brport_attr_root_block, &brport_attr_learning, + &brport_attr_unicast_flood, #ifdef CONFIG_BRIDGE_IGMP_SNOOPING &brport_attr_multicast_router, &brport_attr_multicast_fast_leave, -- cgit v1.2.3 From da12c90e099789a63073fc82a19542ce54d4efb9 Mon Sep 17 00:00:00 2001 From: Gao feng Date: Thu, 6 Jun 2013 14:49:11 +0800 Subject: netlink: Add compare function for netlink_table As we know, netlink sockets are private resource of net namespace, they can communicate with each other only when they in the same net namespace. this works well until we try to add namespace support for other subsystems which use netlink. Don't like ipv4 and route table.., it is not suited to make these subsytems belong to net namespace, Such as audit and crypto subsystems,they are more suitable to user namespace. So we must have the ability to make the netlink sockets in same user namespace can communicate with each other. This patch adds a new function pointer "compare" for netlink_table, we can decide if the netlink sockets can communicate with each other through this netlink_table self-defined compare function. The behavior isn't changed if we don't provide the compare function for netlink_table. Signed-off-by: Gao feng Acked-by: Serge E. Hallyn Signed-off-by: David S. Miller --- include/linux/netlink.h | 1 + net/netlink/af_netlink.c | 33 +++++++++++++++++++++++++-------- net/netlink/af_netlink.h | 1 + 3 files changed, 27 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 6358da5eeee8..f78b430f4af5 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -46,6 +46,7 @@ struct netlink_kernel_cfg { void (*input)(struct sk_buff *skb); struct mutex *cb_mutex; void (*bind)(int group); + bool (*compare)(struct net *net, struct sock *sk); }; extern struct sock *__netlink_kernel_create(struct net *net, int unit, diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 68c167374394..9b6b115e008f 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -858,16 +858,23 @@ netlink_unlock_table(void) wake_up(&nl_table_wait); } +static bool netlink_compare(struct net *net, struct sock *sk) +{ + return net_eq(sock_net(sk), net); +} + static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) { - struct nl_portid_hash *hash = &nl_table[protocol].hash; + struct netlink_table *table = &nl_table[protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; struct sock *sk; read_lock(&nl_table_lock); head = nl_portid_hashfn(hash, portid); sk_for_each(sk, head) { - if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { + if (table->compare(net, sk) && + (nlk_sk(sk)->portid == portid)) { sock_hold(sk); goto found; } @@ -980,7 +987,8 @@ netlink_update_listeners(struct sock *sk) static int netlink_insert(struct sock *sk, struct net *net, u32 portid) { - struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; + struct netlink_table *table = &nl_table[sk->sk_protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; int err = -EADDRINUSE; struct sock *osk; @@ -990,7 +998,8 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid) head = nl_portid_hashfn(hash, portid); len = 0; sk_for_each(osk, head) { - if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) + if (table->compare(net, osk) && + (nlk_sk(osk)->portid == portid)) break; len++; } @@ -1165,6 +1174,7 @@ static int netlink_release(struct socket *sock) kfree_rcu(old, rcu); nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].bind = NULL; + nl_table[sk->sk_protocol].compare = NULL; nl_table[sk->sk_protocol].flags = 0; nl_table[sk->sk_protocol].registered = 0; } @@ -1187,7 +1197,8 @@ static int netlink_autobind(struct socket *sock) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); - struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; + struct netlink_table *table = &nl_table[sk->sk_protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; struct sock *osk; s32 portid = task_tgid_vnr(current); @@ -1199,7 +1210,7 @@ retry: netlink_table_grab(); head = nl_portid_hashfn(hash, portid); sk_for_each(osk, head) { - if (!net_eq(sock_net(osk), net)) + if (!table->compare(net, osk)) continue; if (nlk_sk(osk)->portid == portid) { /* Bind collision, search negative portid values. */ @@ -2315,9 +2326,12 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, rcu_assign_pointer(nl_table[unit].listeners, listeners); nl_table[unit].cb_mutex = cb_mutex; nl_table[unit].module = module; + nl_table[unit].compare = netlink_compare; if (cfg) { nl_table[unit].bind = cfg->bind; nl_table[unit].flags = cfg->flags; + if (cfg->compare) + nl_table[unit].compare = cfg->compare; } nl_table[unit].registered = 1; } else { @@ -2740,6 +2754,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *s; struct nl_seq_iter *iter; + struct net *net; int i, j; ++*pos; @@ -2747,11 +2762,12 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0); + net = seq_file_net(seq); iter = seq->private; s = v; do { s = sk_next(s); - } while (s && sock_net(s) != seq_file_net(seq)); + } while (s && !nl_table[s->sk_protocol].compare(net, s)); if (s) return s; @@ -2763,7 +2779,8 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); - while (s && sock_net(s) != seq_file_net(seq)) + + while (s && !nl_table[s->sk_protocol].compare(net, s)) s = sk_next(s); if (s) { iter->link = i; diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index ed8522265f4e..eaa88d187cdc 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -73,6 +73,7 @@ struct netlink_table { struct mutex *cb_mutex; struct module *module; void (*bind)(int group); + bool (*compare)(struct net *net, struct sock *sock); int registered; }; -- cgit v1.2.3 From 45203a3b380cee28f570475c0d28c169f908c209 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 6 Jun 2013 08:43:22 -0700 Subject: net_sched: add 64bit rate estimators struct gnet_stats_rate_est contains u32 fields, so the bytes per second field can wrap at 34360Mbit. Add a new gnet_stats_rate_est64 structure to get 64bit bps/pps fields, and switch the kernel to use this structure natively. This structure is dumped to user space as a new attribute : TCA_STATS_RATE_EST64 Old tc command will now display the capped bps (to 34360Mbit), instead of wrapped values, and updated tc command will display correct information. Old tc command output, after patch : eric:~# tc -s -d qd sh dev lo qdisc pfifo 8001: root refcnt 2 limit 1000p Sent 80868245400 bytes 1978837 pkt (dropped 0, overlimits 0 requeues 0) rate 34360Mbit 189696pps backlog 0b 0p requeues 0 This patch carefully reorganizes "struct Qdisc" layout to get optimal performance on SMP. Signed-off-by: Eric Dumazet Cc: Ben Hutchings Signed-off-by: David S. Miller --- include/net/act_api.h | 2 +- include/net/gen_stats.h | 10 +++++----- include/net/netfilter/xt_rateest.h | 2 +- include/net/sch_generic.h | 13 +++++++------ include/uapi/linux/gen_stats.h | 11 +++++++++++ net/core/gen_estimator.c | 12 ++++++------ net/core/gen_stats.c | 22 +++++++++++++++++----- net/netfilter/xt_rateest.c | 2 +- net/sched/sch_cbq.c | 2 +- net/sched/sch_drr.c | 2 +- net/sched/sch_hfsc.c | 2 +- net/sched/sch_htb.c | 2 +- net/sched/sch_qfq.c | 2 +- 13 files changed, 54 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/net/act_api.h b/include/net/act_api.h index 06ef7e926a66..b8ffac7b6bab 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -18,7 +18,7 @@ struct tcf_common { struct tcf_t tcfc_tm; struct gnet_stats_basic_packed tcfc_bstats; struct gnet_stats_queue tcfc_qstats; - struct gnet_stats_rate_est tcfc_rate_est; + struct gnet_stats_rate_est64 tcfc_rate_est; spinlock_t tcfc_lock; struct rcu_head tcfc_rcu; }; diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index a79b6cfb02a8..cf8439ba4d11 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -30,7 +30,7 @@ extern int gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b); extern int gnet_stats_copy_rate_est(struct gnet_dump *d, const struct gnet_stats_basic_packed *b, - struct gnet_stats_rate_est *r); + struct gnet_stats_rate_est64 *r); extern int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q); extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); @@ -38,13 +38,13 @@ extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); extern int gnet_stats_finish_copy(struct gnet_dump *d); extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_rate_est *rate_est, + struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt); extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_rate_est *rate_est); + struct gnet_stats_rate_est64 *rate_est); extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_rate_est *rate_est, + struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt); extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, - const struct gnet_stats_rate_est *rate_est); + const struct gnet_stats_rate_est64 *rate_est); #endif diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h index 5a2978d1cb22..495c71f66e7e 100644 --- a/include/net/netfilter/xt_rateest.h +++ b/include/net/netfilter/xt_rateest.h @@ -6,7 +6,7 @@ struct xt_rateest { struct gnet_stats_basic_packed bstats; spinlock_t lock; /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */ - struct gnet_stats_rate_est rstats; + struct gnet_stats_rate_est64 rstats; /* following fields not accessed in hot path */ struct hlist_node list; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e7f4e21cc3e1..df5676029827 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -58,14 +58,12 @@ struct Qdisc { * multiqueue device. */ #define TCQ_F_WARN_NONWC (1 << 16) - int padded; + u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; struct list_head list; u32 handle; u32 parent; - atomic_t refcnt; - struct gnet_stats_rate_est rate_est; int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); @@ -76,8 +74,9 @@ struct Qdisc { */ struct Qdisc *__parent; struct netdev_queue *dev_queue; - struct Qdisc *next_sched; + struct gnet_stats_rate_est64 rate_est; + struct Qdisc *next_sched; struct sk_buff *gso_skb; /* * For performance sake on SMP, we put highly modified fields at the end @@ -88,8 +87,10 @@ struct Qdisc { unsigned int __state; struct gnet_stats_queue qstats; struct rcu_head rcu_head; - spinlock_t busylock; - u32 limit; + int padded; + atomic_t refcnt; + + spinlock_t busylock ____cacheline_aligned_in_smp; }; static inline bool qdisc_is_running(const struct Qdisc *qdisc) diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h index 552c8a0a12d1..6487317ea619 100644 --- a/include/uapi/linux/gen_stats.h +++ b/include/uapi/linux/gen_stats.h @@ -9,6 +9,7 @@ enum { TCA_STATS_RATE_EST, TCA_STATS_QUEUE, TCA_STATS_APP, + TCA_STATS_RATE_EST64, __TCA_STATS_MAX, }; #define TCA_STATS_MAX (__TCA_STATS_MAX - 1) @@ -37,6 +38,16 @@ struct gnet_stats_rate_est { __u32 pps; }; +/** + * struct gnet_stats_rate_est64 - rate estimator + * @bps: current byte rate + * @pps: current packet rate + */ +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; + /** * struct gnet_stats_queue - queuing statistics * @qlen: queue length diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index d9d198aa9fed..6b5b6e7013ca 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -82,7 +82,7 @@ struct gen_estimator { struct list_head list; struct gnet_stats_basic_packed *bstats; - struct gnet_stats_rate_est *rate_est; + struct gnet_stats_rate_est64 *rate_est; spinlock_t *stats_lock; int ewma_log; u64 last_bytes; @@ -167,7 +167,7 @@ static void gen_add_node(struct gen_estimator *est) static struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, - const struct gnet_stats_rate_est *rate_est) + const struct gnet_stats_rate_est64 *rate_est) { struct rb_node *p = est_root.rb_node; @@ -203,7 +203,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats * */ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_rate_est *rate_est, + struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { @@ -258,7 +258,7 @@ EXPORT_SYMBOL(gen_new_estimator); * Note : Caller should respect an RCU grace period before freeing stats_lock */ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_rate_est *rate_est) + struct gnet_stats_rate_est64 *rate_est) { struct gen_estimator *e; @@ -290,7 +290,7 @@ EXPORT_SYMBOL(gen_kill_estimator); * Returns 0 on success or a negative error code. */ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_rate_est *rate_est, + struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { gen_kill_estimator(bstats, rate_est); @@ -306,7 +306,7 @@ EXPORT_SYMBOL(gen_replace_estimator); * Returns true if estimator is active, and false if not. */ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, - const struct gnet_stats_rate_est *rate_est) + const struct gnet_stats_rate_est64 *rate_est) { bool res; diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index ddedf211e588..9d3d9e78397b 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -143,18 +143,30 @@ EXPORT_SYMBOL(gnet_stats_copy_basic); int gnet_stats_copy_rate_est(struct gnet_dump *d, const struct gnet_stats_basic_packed *b, - struct gnet_stats_rate_est *r) + struct gnet_stats_rate_est64 *r) { + struct gnet_stats_rate_est est; + int res; + if (b && !gen_estimator_active(b, r)) return 0; + est.bps = min_t(u64, UINT_MAX, r->bps); + /* we have some time before reaching 2^32 packets per second */ + est.pps = r->pps; + if (d->compat_tc_stats) { - d->tc_stats.bps = r->bps; - d->tc_stats.pps = r->pps; + d->tc_stats.bps = est.bps; + d->tc_stats.pps = est.pps; } - if (d->tail) - return gnet_stats_copy(d, TCA_STATS_RATE_EST, r, sizeof(*r)); + if (d->tail) { + res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est)); + if (res < 0 || est.bps == r->bps) + return res; + /* emit 64bit stats only if needed */ + return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r)); + } return 0; } diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c index ed0db15ab00e..7720b036d76a 100644 --- a/net/netfilter/xt_rateest.c +++ b/net/netfilter/xt_rateest.c @@ -18,7 +18,7 @@ static bool xt_rateest_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_rateest_match_info *info = par->matchinfo; - struct gnet_stats_rate_est *r; + struct gnet_stats_rate_est64 *r; u_int32_t bps1, bps2, pps1, pps2; bool ret = true; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 1bc210ffcba2..71a568862557 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -130,7 +130,7 @@ struct cbq_class { psched_time_t penalized; struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est rate_est; + struct gnet_stats_rate_est64 rate_est; struct tc_cbq_xstats xstats; struct tcf_proto *filter_list; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 759b308d1a8d..8302717ea303 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -25,7 +25,7 @@ struct drr_class { struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est rate_est; + struct gnet_stats_rate_est64 rate_est; struct list_head alist; struct Qdisc *qdisc; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 9facea03faeb..c4075610502c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -114,7 +114,7 @@ struct hfsc_class { struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est rate_est; + struct gnet_stats_rate_est64 rate_est; unsigned int level; /* class level in hierarchy */ struct tcf_proto *filter_list; /* filter list */ unsigned int filter_cnt; /* filter count */ diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index adaedd79389c..162fb800754c 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -78,7 +78,7 @@ struct htb_class { /* general class parameters */ struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est rate_est; + struct gnet_stats_rate_est64 rate_est; struct tc_htb_xstats xstats; /* our special stats */ int refcnt; /* usage count of this class */ diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index d51852bba01c..7c195d972bf0 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -138,7 +138,7 @@ struct qfq_class { struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; - struct gnet_stats_rate_est rate_est; + struct gnet_stats_rate_est64 rate_est; struct Qdisc *qdisc; struct list_head alist; /* Link for active-classes list. */ struct qfq_aggregate *agg; /* Parent aggregate. */ -- cgit v1.2.3 From a0a2af765543a96bdf188ec57dd201e708195302 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 3 Jun 2013 18:10:45 +0200 Subject: nl80211: add kernel-doc for NL80211_FEATURE_ACTIVE_MONITOR Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5920715278c2..1f0019d4cce6 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3579,6 +3579,10 @@ enum nl80211_ap_sme_features { * Peering Management entity which may be implemented by registering for * beacons or NL80211_CMD_NEW_PEER_CANDIDATE events. The mesh beacon is * still generated by the driver. + * @NL80211_FEATURE_ACTIVE_MONITOR: This driver supports an active monitor + * interface. An active monitor interface behaves like a normal monitor + * interface, but gets added to the driver. It ensures that incoming + * unicast packets directed at the configured interface address get ACKed. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, -- cgit v1.2.3 From 8e7c053853b7d299e8a2b8733659b0df8eee51f7 Mon Sep 17 00:00:00 2001 From: Colleen Twitty Date: Mon, 3 Jun 2013 09:53:39 -0700 Subject: {nl,cfg}80211: make peer link expiration time configurable If a STA has a peer that it hasn't seen any tx activity from for a certain length of time, the peer link is expired. This means the inactive STA is removed from the list of peers and that STA is not considered a peer again unless it re-peers. Previously, this inactivity time was always 30 minutes. Now, add it to the mesh configuration and allow it to be configured. Retain 30 minutes as a default value. Signed-off-by: Colleen Twitty Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 4 ++++ include/uapi/linux/nl80211.h | 5 +++++ net/wireless/mesh.c | 2 ++ net/wireless/nl80211.c | 8 +++++++- 4 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 855c76ccff38..31ca11672ca8 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1124,6 +1124,9 @@ struct bss_parameters { * setting for new peer links. * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake * after transmitting its beacon. + * @plink_timeout: If no tx activity is seen from a STA we've established + * peering with for longer than this time (in seconds), then remove it + * from the STA's list of peers. Default is 30 minutes. */ struct mesh_config { u16 dot11MeshRetryTimeout; @@ -1153,6 +1156,7 @@ struct mesh_config { u16 dot11MeshHWMPconfirmationInterval; enum nl80211_mesh_power_mode power_mode; u16 dot11MeshAwakeWindowDuration; + u32 plink_timeout; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 1f0019d4cce6..ca6facf4df0c 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2577,6 +2577,10 @@ enum nl80211_mesh_power_mode { * * @NL80211_MESHCONF_AWAKE_WINDOW: awake window duration (in TUs) * + * @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've + * established peering with for longer than this time (in seconds), then + * remove it from the STA's list of peers. Default is 30 minutes. + * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { @@ -2608,6 +2612,7 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, NL80211_MESHCONF_POWER_MODE, NL80211_MESHCONF_AWAKE_WINDOW, + NL80211_MESHCONF_PLINK_TIMEOUT, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 5dfb289ab761..0daaf72e1b81 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -18,6 +18,7 @@ #define MESH_PATH_TO_ROOT_TIMEOUT 6000 #define MESH_ROOT_INTERVAL 5000 #define MESH_ROOT_CONFIRMATION_INTERVAL 2000 +#define MESH_DEFAULT_PLINK_TIMEOUT 1800 /* timeout in seconds */ /* * Minimum interval between two consecutive PREQs originated by the same @@ -75,6 +76,7 @@ const struct mesh_config default_mesh_config = { .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL, .power_mode = NL80211_MESH_POWER_ACTIVE, .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW, + .plink_timeout = MESH_DEFAULT_PLINK_TIMEOUT, }; const struct mesh_setup default_mesh_setup = { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 88e820b73674..8aa83c04d4eb 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4575,7 +4575,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE, cur_params.power_mode) || nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW, - cur_params.dot11MeshAwakeWindowDuration)) + cur_params.dot11MeshAwakeWindowDuration) || + nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT, + cur_params.plink_timeout)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); @@ -4616,6 +4618,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 }, [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 }, [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, + [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, }; static const struct nla_policy @@ -4753,6 +4756,9 @@ do { \ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, 0, 65535, mask, NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 1, 0xffffffff, + mask, NL80211_MESHCONF_PLINK_TIMEOUT, + nla_get_u32); if (mask_out) *mask_out = mask; -- cgit v1.2.3 From ffb3cf3000aa12facdccbdfcb10bfebda7199209 Mon Sep 17 00:00:00 2001 From: Ashok Nagarajan Date: Mon, 3 Jun 2013 10:33:36 -0700 Subject: {nl,mac,cfg}80211: Allow user to configure basic rates for mesh Currently mesh uses mandatory rates as the default basic rates. Allow basic rates to be configured during mesh join. Basic rates are applied only if channel is also provided with mesh join command. Signed-off-by: Ashok Nagarajan [some whitespace fixes, refuse basic rates w/o channel] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ net/mac80211/cfg.c | 1 + net/mac80211/mesh.c | 4 ---- net/wireless/mesh.c | 10 ++++++++++ net/wireless/nl80211.c | 17 +++++++++++++++++ 5 files changed, 30 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 31ca11672ca8..6a43c34ce96f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1176,6 +1176,7 @@ struct mesh_config { * @dtim_period: DTIM period to use * @beacon_interval: beacon interval to use * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] + * @basic_rates: basic rates to use when creating the mesh * * These parameters are fixed when the mesh is created. */ @@ -1195,6 +1196,7 @@ struct mesh_setup { u8 dtim_period; u16 beacon_interval; int mcast_rate[IEEE80211_NUM_BANDS]; + u32 basic_rates; }; /** diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 344a57968079..cd6f35f6e714 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1759,6 +1759,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, /* mcast rate setting in Mesh Node */ memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, sizeof(setup->mcast_rate)); + sdata->vif.bss_conf.basic_rates = setup->basic_rates; sdata->vif.bss_conf.beacon_int = setup->beacon_interval; sdata->vif.bss_conf.dtim_period = setup->dtim_period; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 4ee527f78677..6c33af482df4 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -738,9 +738,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); - struct ieee80211_supported_band *sband = - sdata->local->hw.wiphy->bands[band]; local->fif_other_bss++; /* mesh ifaces must set allmulti to forward mcast traffic */ @@ -758,7 +755,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) sdata->vif.bss_conf.ht_operation_mode = ifmsh->mshcfg.ht_opmode; sdata->vif.bss_conf.enable_beacon = true; - sdata->vif.bss_conf.basic_rates = ieee80211_mandatory_rates(sband); changed |= ieee80211_mps_local_status_update(sdata); diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 0daaf72e1b81..30c49202ee4d 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -162,6 +162,16 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, setup->chandef.center_freq1 = setup->chandef.chan->center_freq; } + /* + * check if basic rates are available otherwise use mandatory rates as + * basic rates + */ + if (!setup->basic_rates) { + struct ieee80211_supported_band *sband = + rdev->wiphy.bands[setup->chandef.chan->band]; + setup->basic_rates = ieee80211_mandatory_rates(sband); + } + if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef)) return -EINVAL; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8aa83c04d4eb..687cb6497598 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7487,6 +7487,23 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) setup.chandef.chan = NULL; } + if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { + u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); + int n_rates = + nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); + struct ieee80211_supported_band *sband; + + if (!setup.chandef.chan) + return -EINVAL; + + sband = rdev->wiphy.bands[setup.chandef.chan->band]; + + err = ieee80211_get_ratemask(sband, rates, n_rates, + &setup.basic_rates); + if (err) + return err; + } + return cfg80211_join_mesh(rdev, dev, &setup, &cfg); } -- cgit v1.2.3 From 130d3d68b52097c7ae081109f700b02776adcb9c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 6 Jun 2013 13:56:19 -0700 Subject: net_sched: psched_ratecfg_precompute() improvements Before allowing 64bits bytes rates, refactor psched_ratecfg_precompute() to get better comments and increased accuracy. rate_bps field is renamed to rate_bytes_ps, as we only have to worry about bytes per second. Signed-off-by: Eric Dumazet Cc: Ben Greear Signed-off-by: David S. Miller --- include/net/sch_generic.h | 4 ++-- net/sched/sch_generic.c | 44 ++++++++++++++++++++------------------------ 2 files changed, 22 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index df5676029827..6eab63363e59 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -680,7 +680,7 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, #endif struct psched_ratecfg { - u64 rate_bps; + u64 rate_bytes_ps; /* bytes per second */ u32 mult; u16 overhead; u8 shift; @@ -698,7 +698,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, const struct psched_ratecfg *r) { memset(res, 0, sizeof(*res)); - res->rate = r->rate_bps >> 3; + res->rate = r->rate_bytes_ps; res->overhead = r->overhead; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 20224086cc28..4626cef4b76e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -901,37 +901,33 @@ void dev_shutdown(struct net_device *dev) void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf) { - u64 factor; - u64 mult; - int shift; - memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; - r->rate_bps = (u64)conf->rate << 3; + r->rate_bytes_ps = conf->rate; r->mult = 1; /* - * Calibrate mult, shift so that token counting is accurate - * for smallest packet size (64 bytes). Token (time in ns) is - * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will - * work as long as the smallest packet transfer time can be - * accurately represented in nanosec. + * The deal here is to replace a divide by a reciprocal one + * in fast path (a reciprocal divide is a multiply and a shift) + * + * Normal formula would be : + * time_in_ns = (NSEC_PER_SEC * len) / rate_bps + * + * We compute mult/shift to use instead : + * time_in_ns = (len * mult) >> shift; + * + * We try to get the highest possible mult value for accuracy, + * but have to make sure no overflows will ever happen. */ - if (r->rate_bps > 0) { - /* - * Higher shift gives better accuracy. Find the largest - * shift such that mult fits in 32 bits. - */ - for (shift = 0; shift < 16; shift++) { - r->shift = shift; - factor = 8LLU * NSEC_PER_SEC * (1 << r->shift); - mult = div64_u64(factor, r->rate_bps); - if (mult > UINT_MAX) + if (r->rate_bytes_ps > 0) { + u64 factor = NSEC_PER_SEC; + + for (;;) { + r->mult = div64_u64(factor, r->rate_bytes_ps); + if (r->mult & (1U << 31) || factor & (1ULL << 63)) break; + factor <<= 1; + r->shift++; } - - r->shift = shift - 1; - factor = 8LLU * NSEC_PER_SEC * (1 << r->shift); - r->mult = div64_u64(factor, r->rate_bps); } } EXPORT_SYMBOL(psched_ratecfg_precompute); -- cgit v1.2.3 From e9897071350bd9d94a56b5b6f79c85b1a98fc7e7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 7 Jun 2013 08:48:57 -0700 Subject: igmp: hash a hash table to speedup ip_check_mc_rcu() After IP route cache removal, multicast applications using a lot of multicast addresses hit a O(N) behavior in ip_check_mc_rcu() Add a per in_device hash table to get faster lookup. This hash table is created only if the number of items in mc_list is above 4. Reported-by: Shawn Bohrer Signed-off-by: Eric Dumazet Tested-by: Shawn Bohrer Reviewed-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/igmp.h | 1 + include/linux/inetdevice.h | 5 ++++ net/ipv4/devinet.c | 1 + net/ipv4/igmp.c | 73 ++++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 77 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 7f2bf1518480..e3362b5f13e8 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -84,6 +84,7 @@ struct ip_mc_list { struct ip_mc_list *next; struct ip_mc_list __rcu *next_rcu; }; + struct ip_mc_list __rcu *next_hash; struct timer_list timer; int users; atomic_t refcnt; diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index ea1e3b863890..b99cd23f3474 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -50,12 +50,17 @@ struct ipv4_devconf { DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); }; +#define MC_HASH_SZ_LOG 9 + struct in_device { struct net_device *dev; atomic_t refcnt; int dead; struct in_ifaddr *ifa_list; /* IP ifaddr chain */ + struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ + struct ip_mc_list __rcu * __rcu *mc_hash; + int mc_count; /* Number of installed mcasts */ spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index b047e2d8a614..3469506c106d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -215,6 +215,7 @@ void in_dev_finish_destroy(struct in_device *idev) WARN_ON(idev->ifa_list); WARN_ON(idev->mc_list); + kfree(rcu_dereference_protected(idev->mc_hash, 1)); #ifdef NET_REFCNT_DEBUG pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL"); #endif diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 450f625361e4..f72011df9c59 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1217,6 +1217,57 @@ static void igmp_group_added(struct ip_mc_list *im) * Multicast list managers */ +static u32 ip_mc_hash(const struct ip_mc_list *im) +{ + return hash_32((u32)im->multiaddr, MC_HASH_SZ_LOG); +} + +static void ip_mc_hash_add(struct in_device *in_dev, + struct ip_mc_list *im) +{ + struct ip_mc_list __rcu **mc_hash; + u32 hash; + + mc_hash = rtnl_dereference(in_dev->mc_hash); + if (mc_hash) { + hash = ip_mc_hash(im); + im->next_hash = rtnl_dereference(mc_hash[hash]); + rcu_assign_pointer(mc_hash[hash], im); + return; + } + + /* do not use a hash table for small number of items */ + if (in_dev->mc_count < 4) + return; + + mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG, + GFP_KERNEL); + if (!mc_hash) + return; + + for_each_pmc_rtnl(in_dev, im) { + hash = ip_mc_hash(im); + im->next_hash = rtnl_dereference(mc_hash[hash]); + RCU_INIT_POINTER(mc_hash[hash], im); + } + + rcu_assign_pointer(in_dev->mc_hash, mc_hash); +} + +static void ip_mc_hash_remove(struct in_device *in_dev, + struct ip_mc_list *im) +{ + struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash); + struct ip_mc_list *aux; + + if (!mc_hash) + return; + mc_hash += ip_mc_hash(im); + while ((aux = rtnl_dereference(*mc_hash)) != im) + mc_hash = &aux->next_hash; + *mc_hash = im->next_hash; +} + /* * A socket has joined a multicast group on device dev. @@ -1258,6 +1309,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) in_dev->mc_count++; rcu_assign_pointer(in_dev->mc_list, im); + ip_mc_hash_add(in_dev, im); + #ifdef CONFIG_IP_MULTICAST igmpv3_del_delrec(in_dev, im->multiaddr); #endif @@ -1314,6 +1367,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) ip = &i->next_rcu) { if (i->multiaddr == addr) { if (--i->users == 0) { + ip_mc_hash_remove(in_dev, i); *ip = i->next_rcu; in_dev->mc_count--; igmp_group_dropped(i); @@ -2321,12 +2375,25 @@ void ip_mc_drop_socket(struct sock *sk) int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto) { struct ip_mc_list *im; + struct ip_mc_list __rcu **mc_hash; struct ip_sf_list *psf; int rv = 0; - for_each_pmc_rcu(in_dev, im) { - if (im->multiaddr == mc_addr) - break; + mc_hash = rcu_dereference(in_dev->mc_hash); + if (mc_hash) { + u32 hash = hash_32((u32)mc_addr, MC_HASH_SZ_LOG); + + for (im = rcu_dereference(mc_hash[hash]); + im != NULL; + im = rcu_dereference(im->next_hash)) { + if (im->multiaddr == mc_addr) + break; + } + } else { + for_each_pmc_rcu(in_dev, im) { + if (im->multiaddr == mc_addr) + break; + } } if (im && proto == IPPROTO_IGMP) { rv = 1; -- cgit v1.2.3 From da5bab079f9b7d90ba234965a14914ace55e45e9 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 8 Jun 2013 12:56:03 +0200 Subject: net: udp4: move GSO functions to udp_offload Similarly to TCP offloading and UDPv6 offloading, move all related UDPv4 functions to udp_offload.c to make things more explicit. Also, by this, we can make those functions static. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/net/udp.h | 7 ++-- net/ipv4/Makefile | 2 +- net/ipv4/af_inet.c | 9 +---- net/ipv4/udp.c | 75 +------------------------------------ net/ipv4/udp_offload.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 108 insertions(+), 85 deletions(-) create mode 100644 net/ipv4/udp_offload.c (limited to 'include') diff --git a/include/net/udp.h b/include/net/udp.h index 065f379c6503..b30a71a51839 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -187,6 +187,8 @@ extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); extern int udp_disconnect(struct sock *sk, int flags); extern unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); +extern struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, + netdev_features_t features); extern int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, @@ -262,11 +264,10 @@ extern int udp4_proc_init(void); extern void udp4_proc_exit(void); #endif +extern int udpv4_offload_init(void); + extern void udp_init(void); -extern int udp4_ufo_send_check(struct sk_buff *skb); -extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, - netdev_features_t features); extern void udp_encap_enable(void); #if IS_ENABLED(CONFIG_IPV6) extern void udpv6_encap_enable(void); diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 4d3e138c564f..7fcf8101d85f 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -9,7 +9,7 @@ obj-y := route.o inetpeer.o protocol.o \ tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \ tcp_offload.o datagram.o raw.o udp.o udplite.o \ - arp.o icmp.o devinet.o af_inet.o igmp.o \ + udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \ fib_frontend.o fib_semantics.o fib_trie.o \ inet_fragment.o ping.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 7b514290efc6..5598b06d62db 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1566,13 +1566,6 @@ static const struct net_protocol udp_protocol = { .netns_ok = 1, }; -static const struct net_offload udp_offload = { - .callbacks = { - .gso_send_check = udp4_ufo_send_check, - .gso_segment = udp4_ufo_fragment, - }, -}; - static const struct net_protocol icmp_protocol = { .handler = icmp_rcv, .err_handler = icmp_err, @@ -1672,7 +1665,7 @@ static int __init ipv4_offload_init(void) /* * Add offloads */ - if (inet_add_offload(&udp_offload, IPPROTO_UDP) < 0) + if (udpv4_offload_init() < 0) pr_crit("%s: Cannot add UDP protocol offload\n", __func__); if (tcpv4_offload_init() < 0) pr_crit("%s: Cannot add TCP protocol offload\n", __func__); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 2955b25aee6d..f65bc32c0266 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2290,29 +2290,8 @@ void __init udp_init(void) sysctl_udp_wmem_min = SK_MEM_QUANTUM; } -int udp4_ufo_send_check(struct sk_buff *skb) -{ - if (!pskb_may_pull(skb, sizeof(struct udphdr))) - return -EINVAL; - - if (likely(!skb->encapsulation)) { - const struct iphdr *iph; - struct udphdr *uh; - - iph = ip_hdr(skb); - uh = udp_hdr(skb); - - uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, - IPPROTO_UDP, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - skb->ip_summed = CHECKSUM_PARTIAL; - } - return 0; -} - -static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, - netdev_features_t features) +struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); int mac_len = skb->mac_len; @@ -2371,53 +2350,3 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, out: return segs; } - -struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, - netdev_features_t features) -{ - struct sk_buff *segs = ERR_PTR(-EINVAL); - unsigned int mss; - mss = skb_shinfo(skb)->gso_size; - if (unlikely(skb->len <= mss)) - goto out; - - if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { - /* Packet is from an untrusted source, reset gso_segs. */ - int type = skb_shinfo(skb)->gso_type; - - if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | - SKB_GSO_UDP_TUNNEL | - SKB_GSO_GRE | SKB_GSO_MPLS) || - !(type & (SKB_GSO_UDP)))) - goto out; - - skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); - - segs = NULL; - goto out; - } - - /* Fragment the skb. IP headers of the fragments are updated in - * inet_gso_segment() - */ - if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) - segs = skb_udp_tunnel_segment(skb, features); - else { - int offset; - __wsum csum; - - /* Do software UFO. Complete and fill in the UDP checksum as - * HW cannot do checksum of UDP packets sent as multiple - * IP fragments. - */ - offset = skb_checksum_start_offset(skb); - csum = skb_checksum(skb, offset, skb->len - offset, 0); - offset += skb->csum_offset; - *(__sum16 *)(skb->data + offset) = csum_fold(csum); - skb->ip_summed = CHECKSUM_NONE; - - segs = skb_segment(skb, features); - } -out: - return segs; -} diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c new file mode 100644 index 000000000000..f35eccaa855e --- /dev/null +++ b/net/ipv4/udp_offload.c @@ -0,0 +1,100 @@ +/* + * IPV4 GSO/GRO offload support + * Linux INET implementation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * UDPv4 GSO support + */ + +#include +#include +#include + +static int udp4_ufo_send_check(struct sk_buff *skb) +{ + if (!pskb_may_pull(skb, sizeof(struct udphdr))) + return -EINVAL; + + if (likely(!skb->encapsulation)) { + const struct iphdr *iph; + struct udphdr *uh; + + iph = ip_hdr(skb); + uh = udp_hdr(skb); + + uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, + IPPROTO_UDP, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + skb->ip_summed = CHECKSUM_PARTIAL; + } + + return 0; +} + +static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + unsigned int mss; + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(skb->len <= mss)) + goto out; + + if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { + /* Packet is from an untrusted source, reset gso_segs. */ + int type = skb_shinfo(skb)->gso_type; + + if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_GRE | SKB_GSO_MPLS) || + !(type & (SKB_GSO_UDP)))) + goto out; + + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); + + segs = NULL; + goto out; + } + + /* Fragment the skb. IP headers of the fragments are updated in + * inet_gso_segment() + */ + if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) + segs = skb_udp_tunnel_segment(skb, features); + else { + int offset; + __wsum csum; + + /* Do software UFO. Complete and fill in the UDP checksum as + * HW cannot do checksum of UDP packets sent as multiple + * IP fragments. + */ + offset = skb_checksum_start_offset(skb); + csum = skb_checksum(skb, offset, skb->len - offset, 0); + offset += skb->csum_offset; + *(__sum16 *)(skb->data + offset) = csum_fold(csum); + skb->ip_summed = CHECKSUM_NONE; + + segs = skb_segment(skb, features); + } +out: + return segs; +} + +static const struct net_offload udpv4_offload = { + .callbacks = { + .gso_send_check = udp4_ufo_send_check, + .gso_segment = udp4_ufo_fragment, + }, +}; + +int __init udpv4_offload_init(void) +{ + return inet_add_offload(&udpv4_offload, IPPROTO_UDP); +} -- cgit v1.2.3 From dffebd2c5cd528a136b276a2a75c56222312d7a4 Mon Sep 17 00:00:00 2001 From: Narendra K Date: Mon, 10 Jun 2013 19:34:03 +0530 Subject: doc:networking: Update comment for dev_id field in netdevice.h This patch updates the comment for 'dev_id' field in 'include/linux/netdevice.h' to reflect the intended usage of 'dev_id'. References: http://marc.info/?l=linux-netdev&m=136992115300526&w=2 References: http://marc.info/?l=linux-netdev&m=137062569014612&w=2 Signed-off-by: Narendra K Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller --- include/linux/netdevice.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2ecb96d9a1e5..e5d65573b4d6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1174,8 +1174,10 @@ struct net_device { unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ unsigned char neigh_priv_len; - unsigned short dev_id; /* for shared network cards */ - + unsigned short dev_id; /* Used to differentiate devices + * that share the same link + * layer address + */ spinlock_t addr_list_lock; struct netdev_hw_addr_list uc; /* Unicast mac addresses */ struct netdev_hw_addr_list mc; /* Multicast mac addresses */ -- cgit v1.2.3 From d80b35beac78b52faad2359adf6a6b14e2725e51 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 10 Jun 2013 17:42:24 +0200 Subject: team: use kfree_rcu instead of synchronize_rcu in team_port_dev Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/team.c | 3 +-- include/linux/if_team.h | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 9cbe09df7765..488ed4f535e5 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1192,8 +1192,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); - synchronize_rcu(); - kfree(port); + kfree_rcu(port, rcu); netdev_info(dev, "Port device %s removed\n", portname); __team_compute_features(team); diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 4474557904f6..705459524706 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -69,6 +69,7 @@ struct team_port { s32 priority; /* lower number ~ higher priority */ u16 queue_id; struct list_head qom_list; /* node in queue override mapping list */ + struct rcu_head rcu; long mode_priv[0]; }; -- cgit v1.2.3 From 735d381fa57c573935d35a24ea271ec99897ac63 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 10 Jun 2013 17:42:25 +0200 Subject: team: remove synchronize_rcu() called during port disable Check the unlikely case of team->en_port_count == 0 before modulo operation. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/team.c | 20 +++++--------------- drivers/net/team/team_mode_loadbalance.c | 3 +-- drivers/net/team/team_mode_roundrobin.c | 3 ++- include/linux/if_team.h | 10 ++++++++++ 4 files changed, 18 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 488ed4f535e5..e46fef38685b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -525,31 +525,26 @@ static void team_set_no_mode(struct team *team) team->mode = &__team_no_mode; } -static void __team_adjust_ops(struct team *team, int en_port_count) +static void team_adjust_ops(struct team *team) { /* * To avoid checks in rx/tx skb paths, ensure here that non-null and * correct ops are always set. */ - if (!en_port_count || !team_is_mode_set(team) || + if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->transmit) team->ops.transmit = team_dummy_transmit; else team->ops.transmit = team->mode->ops->transmit; - if (!en_port_count || !team_is_mode_set(team) || + if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->receive) team->ops.receive = team_dummy_receive; else team->ops.receive = team->mode->ops->receive; } -static void team_adjust_ops(struct team *team) -{ - __team_adjust_ops(team, team->en_port_count); -} - /* * We can benefit from the fact that it's ensured no port is present * at the time of mode change. Therefore no packets are in fly so there's no @@ -877,14 +872,9 @@ static void team_port_disable(struct team *team, hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; - team_queue_override_port_del(team, port); - __team_adjust_ops(team, team->en_port_count - 1); - /* - * Wait until readers see adjusted ops. This ensures that - * readers never see team->en_port_count == 0 - */ - synchronize_rcu(); team->en_port_count--; + team_queue_override_port_del(team, port); + team_adjust_ops(team); } #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index cdc31b5ea15e..829a9cd2b4da 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -112,9 +112,8 @@ static struct team_port *lb_hash_select_tx_port(struct team *team, struct sk_buff *skb, unsigned char hash) { - int port_index; + int port_index = team_num_to_port_index(team, hash); - port_index = hash % team->en_port_count; return team_get_port_by_index_rcu(team, port_index); } diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index d268e4de781b..90311c7375fb 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -30,7 +30,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb) struct team_port *port; int port_index; - port_index = rr_priv(team)->sent_packets++ % team->en_port_count; + port_index = team_num_to_port_index(team, + rr_priv(team)->sent_packets++); port = team_get_port_by_index_rcu(team, port_index); port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 705459524706..b662045a81c0 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -229,6 +229,16 @@ static inline struct team_port *team_get_port_by_index(struct team *team, return port; return NULL; } + +static inline int team_num_to_port_index(struct team *team, int num) +{ + int en_port_count = ACCESS_ONCE(team->en_port_count); + + if (unlikely(!en_port_count)) + return 0; + return num % en_port_count; +} + static inline struct team_port *team_get_port_by_index_rcu(struct team *team, int port_index) { -- cgit v1.2.3 From 503b47eafc39cb28db7b694aa0bb9276adccb89c Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Mon, 10 Jun 2013 18:58:16 +0300 Subject: ipv4: remove is_data also from ip_options documentation. commit ef722495c8867aacc1db0675a6737e5cf1e72e07 ( [IPV4]: Remove unused ip_options->is_data) removed the unused is_data member from ip_options struct. This patch removes is_data also from the documentation of the ip_options struct. Signed-off-by: Rami Rosen Signed-off-by: David S. Miller --- include/net/inet_sock.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 7235ae73a1e8..b21a7f06d6a4 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -32,7 +32,6 @@ * * @faddr - Saved first hop address * @nexthop - Saved nexthop address in LSRR and SSRR - * @is_data - Options in __data, rather than skb * @is_strictroute - Strict source route * @srr_is_hit - Packet destination addr was our one * @is_changed - IP checksum more not valid -- cgit v1.2.3 From 274038f8c94c493e2977983e2aecb5f5f0778479 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Tue, 11 Jun 2013 14:41:24 +0400 Subject: tun: Report "persist" flag to userspace The TUN_PERSIST flag is not reported at all -- both TUNGETIFF, and sysfs "flags" attribute skip one. Knowing whether a device is persistent or not is critical for checkpoint-restore, thus I propose to add the read-only IFF_PERSIST one for this. Setting this new IFF_PERSIST is hardly possible, as TUNSETIFF doesn't check for unknown flags being zero and thus there can be trash. Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- drivers/net/tun.c | 3 +++ include/uapi/linux/if_tun.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 89776c592151..b90a73165d30 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1530,6 +1530,9 @@ static int tun_flags(struct tun_struct *tun) if (tun->flags & TUN_TAP_MQ) flags |= IFF_MULTI_QUEUE; + if (tun->flags & TUN_PERSIST) + flags |= IFF_PERSIST; + return flags; } diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index 2835b85fd46d..82334f88967e 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -68,6 +68,8 @@ #define IFF_MULTI_QUEUE 0x0100 #define IFF_ATTACH_QUEUE 0x0200 #define IFF_DETACH_QUEUE 0x0400 +/* read-only flag */ +#define IFF_PERSIST 0x0800 /* Features for GSO (TUNSETOFFLOAD). */ #define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ -- cgit v1.2.3 From d9a90a3105eb6ca89aab74223e6526ab4a5e44b5 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 13 Jun 2013 14:23:35 +0800 Subject: macvtap: slient sparse warnings This patch silents the following sparse warnings: drivers/net/macvtap.c:98:9: warning: incorrect type in assignment (different address spaces) drivers/net/macvtap.c:98:9: expected struct macvtap_queue * drivers/net/macvtap.c:98:9: got struct macvtap_queue [noderef] * drivers/net/macvtap.c:120:9: warning: incorrect type in assignment (different address spaces) drivers/net/macvtap.c:120:9: expected struct macvtap_queue * drivers/net/macvtap.c:120:9: got struct macvtap_queue [noderef] * drivers/net/macvtap.c:151:22: error: incompatible types in comparison expression (different address spaces) drivers/net/macvtap.c:233:23: error: incompatible types in comparison expression (different address spaces) drivers/net/macvtap.c:243:23: error: incompatible types in comparison expression (different address spaces) drivers/net/macvtap.c:247:15: error: incompatible types in comparison expression (different address spaces) CC [M] drivers/net/macvtap.o drivers/net/macvlan.c:232:24: error: incompatible types in comparison expression (different address spaces) Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/macvtap.c | 2 +- include/linux/if_macvlan.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 992151ce08a4..edcbf1c0d6ed 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -429,7 +429,7 @@ static int macvtap_open(struct inode *inode, struct file *file) if (!q) goto out; - q->sock.wq = &q->wq; + RCU_INIT_POINTER(q->sock.wq, &q->wq); init_waitqueue_head(&q->wq.wait); q->sock.type = SOCK_RAW; q->sock.state = SS_CONNECTED; diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 19121331a5ed..f49a9f66c3d9 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -70,7 +70,7 @@ struct macvlan_dev { int (*receive)(struct sk_buff *skb); int (*forward)(struct net_device *dev, struct sk_buff *skb); /* This array tracks active taps. */ - struct macvtap_queue *taps[MAX_MACVTAP_QUEUES]; + struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES]; /* This list tracks all taps (both enabled and disabled) */ struct list_head queue_list; int numvtaps; -- cgit v1.2.3 From 85f16525a2eb66e6092cbd8dcf42371df8334ed0 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Tue, 11 Jun 2013 15:35:32 -0700 Subject: tcp: properly send new data in fast recovery in first RTT Linux sends new unset data during disorder and recovery state if all (suspected) lost packets have been retransmitted ( RFC5681, section 3.2 step 1 & 2, RFC3517 section 4, NexSeg() Rule 2). One requirement is to keep the receive window about twice the estimated sender's congestion window (tcp_rcv_space_adjust()), assuming the fast retransmits repair the losses in the next round trip. But currently it's not the case on the first round trip in either normal or Fast Open connection, beucase the initial receive window is identical to (expected) sender's initial congestion window. The fix is to double it. Signed-off-by: Yuchung Cheng Acked-by: Neal Cardwell Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 5 ++--- net/ipv4/tcp_input.c | 13 ++----------- net/ipv4/tcp_output.c | 33 ++++++++++++++++++--------------- 3 files changed, 22 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 0d637e9403a5..6fa80831dc40 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -61,9 +61,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); */ #define MAX_TCP_WINDOW 32767U -/* Offer an initial receive window of 10 mss. */ -#define TCP_DEFAULT_INIT_RCVWND 10 - /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ #define TCP_MIN_MSS 88U @@ -1047,6 +1044,8 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) rx_opt->num_sacks = 0; } +extern u32 tcp_default_init_rwnd(u32 mss); + /* Determine a window scaling and initial window to offer. */ extern void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 907311c9a012..46271cdcf088 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -347,22 +347,13 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) } /* 3. Tuning rcvbuf, when connection enters established state. */ - static void tcp_fixup_rcvbuf(struct sock *sk) { u32 mss = tcp_sk(sk)->advmss; - u32 icwnd = TCP_DEFAULT_INIT_RCVWND; int rcvmem; - /* Limit to 10 segments if mss <= 1460, - * or 14600/mss segments, with a minimum of two segments. - */ - if (mss > 1460) - icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); - - rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER); - - rcvmem *= icwnd; + rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * + tcp_default_init_rwnd(mss); if (sk->sk_rcvbuf < rcvmem) sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ec335fabd5cc..3dd46eab3b05 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -181,6 +181,21 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } + +u32 tcp_default_init_rwnd(u32 mss) +{ + /* Initial receive window should be twice of TCP_INIT_CWND to + * enable proper sending of new unset data during fast recovery + * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a + * limit when mss is larger than 1460. + */ + u32 init_rwnd = TCP_INIT_CWND * 2; + + if (mss > 1460) + init_rwnd = max((1460 * init_rwnd) / mss, 2U); + return init_rwnd; +} + /* Determine a window scaling and initial window to offer. * Based on the assumption that the given amount of space * will be offered. Store the results in the tp structure. @@ -230,22 +245,10 @@ void tcp_select_initial_window(int __space, __u32 mss, } } - /* Set initial window to a value enough for senders starting with - * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place - * a limit on the initial window when mss is larger than 1460. - */ if (mss > (1 << *rcv_wscale)) { - int init_cwnd = TCP_DEFAULT_INIT_RCVWND; - if (mss > 1460) - init_cwnd = - max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); - /* when initializing use the value from init_rcv_wnd - * rather than the default from above - */ - if (init_rcv_wnd) - *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); - else - *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); + if (!init_rcv_wnd) /* Use default unless specified otherwise */ + init_rcv_wnd = tcp_default_init_rwnd(mss); + *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); } /* Set the clamp no higher than max representable value */ -- cgit v1.2.3 From 817cee767523769cbc5ac94e439cde0c21752cbc Mon Sep 17 00:00:00 2001 From: Alexander Bondar Date: Sun, 19 May 2013 14:23:57 +0300 Subject: mac80211: track AP's beacon rate and give it to the driver Track the AP's beacon rate in the scan BSS data and in the interface configuration to let the drivers know which rate the AP is using. This information may be used by drivers, in our case to let the firmware optimise beacon RX. Signed-off-by: Alexander Bondar Signed-off-by: Johannes Berg --- include/net/mac80211.h | 2 ++ net/mac80211/ieee80211_i.h | 1 + net/mac80211/mlme.c | 8 +++++++- net/mac80211/scan.c | 9 +++++++++ 4 files changed, 19 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a405a7a9775c..5b7a3dadadde 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -305,6 +305,7 @@ enum ieee80211_rssi_event { * @basic_rates: bitmap of basic rates, each bit stands for an * index into the rate table configured by the driver in * the current band. + * @beacon_rate: associated AP's beacon TX rate * @mcast_rate: per-band multicast rate index + 1 (0: disabled) * @bssid: The BSSID for this BSS * @enable_beacon: whether beaconing should be enabled or not @@ -352,6 +353,7 @@ struct ieee80211_bss_conf { u32 sync_device_ts; u8 sync_dtim_count; u32 basic_rates; + struct ieee80211_rate *beacon_rate; int mcast_rate[IEEE80211_NUM_BANDS]; u16 ht_operation_mode; s32 cqm_rssi_thold; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 7a6f1a0207ec..a4dfb0be53d7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -94,6 +94,7 @@ struct ieee80211_bss { #define IEEE80211_MAX_SUPP_RATES 32 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; size_t supp_rates_len; + struct ieee80211_rate *beacon_rate; /* * During association, we save an ERP value from a probe response so diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ad9bb9e10cbb..87f2d4df31f8 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1779,8 +1779,10 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, * probably just won't work at all. */ bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1; + bss_conf->beacon_rate = bss->beacon_rate; bss_info_changed |= BSS_CHANGED_BEACON_INFO; } else { + bss_conf->beacon_rate = NULL; bss_conf->dtim_period = 0; } @@ -1903,6 +1905,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.chswitch_timer); sdata->vif.bss_conf.dtim_period = 0; + sdata->vif.bss_conf.beacon_rate = NULL; + ifmgd->have_beacon = false; ifmgd->flags = 0; @@ -2754,8 +2758,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, channel); - if (bss) + if (bss) { ieee80211_rx_bss_put(local, bss); + sdata->vif.bss_conf.beacon_rate = bss->beacon_rate; + } if (!sdata->u.mgd.associated || !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 99b103921a4b..1b122a79b0d8 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -140,6 +140,15 @@ ieee80211_bss_info_update(struct ieee80211_local *local, bss->valid_data |= IEEE80211_BSS_VALID_WMM; } + if (beacon) { + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[rx_status->band]; + if (!(rx_status->flag & RX_FLAG_HT) && + !(rx_status->flag & RX_FLAG_VHT)) + bss->beacon_rate = + &sband->bitrates[rx_status->rate_idx]; + } + return bss; } -- cgit v1.2.3 From 1095e69f47926db6f1350a9d6a38626521580e87 Mon Sep 17 00:00:00 2001 From: Frederic Danis Date: Wed, 22 May 2013 11:36:17 +0200 Subject: NFC: NCI: Fix skb->dev usage skb->dev is used for carrying a net_device pointer and not an nci_dev pointer. Remove usage of skb-dev to carry nci_dev and replace it by parameter in nci_recv_frame(), nci_send_frame() and driver send() functions. NfcWilink driver is also updated to use those functions. Signed-off-by: Frederic Danis Signed-off-by: Samuel Ortiz --- drivers/nfc/nfcwilink.c | 17 +++++------------ include/net/nfc/nci_core.h | 4 ++-- net/nfc/nci/core.c | 17 ++++++----------- net/nfc/nci/data.c | 2 -- 4 files changed, 13 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c index 3b731acbc408..41cf8f70a6ad 100644 --- a/drivers/nfc/nfcwilink.c +++ b/drivers/nfc/nfcwilink.c @@ -109,7 +109,7 @@ enum { NFCWILINK_FW_DOWNLOAD, }; -static int nfcwilink_send(struct sk_buff *skb); +static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb); static inline struct sk_buff *nfcwilink_skb_alloc(unsigned int len, gfp_t how) { @@ -156,8 +156,6 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name) return -ENOMEM; } - skb->dev = (void *)drv->ndev; - cmd = (struct nci_vs_nfcc_info_cmd *) skb_put(skb, sizeof(struct nci_vs_nfcc_info_cmd)); cmd->gid = NCI_VS_NFCC_INFO_CMD_GID; @@ -166,7 +164,7 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name) drv->nfcc_info.plen = 0; - rc = nfcwilink_send(skb); + rc = nfcwilink_send(drv->ndev, skb); if (rc) return rc; @@ -232,11 +230,9 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len) return -ENOMEM; } - skb->dev = (void *)drv->ndev; - memcpy(skb_put(skb, len), data, len); - rc = nfcwilink_send(skb); + rc = nfcwilink_send(drv->ndev, skb); if (rc) return rc; @@ -371,10 +367,8 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb) return 0; } - skb->dev = (void *) drv->ndev; - /* Forward skb to NCI core layer */ - rc = nci_recv_frame(skb); + rc = nci_recv_frame(drv->ndev, skb); if (rc < 0) { nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc); return rc; @@ -480,9 +474,8 @@ static int nfcwilink_close(struct nci_dev *ndev) return rc; } -static int nfcwilink_send(struct sk_buff *skb) +static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb) { - struct nci_dev *ndev = (struct nci_dev *)skb->dev; struct nfcwilink *drv = nci_get_drvdata(ndev); struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000}; long len; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 5bc0c460edc0..1009d3dcb316 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -66,7 +66,7 @@ struct nci_dev; struct nci_ops { int (*open)(struct nci_dev *ndev); int (*close)(struct nci_dev *ndev); - int (*send)(struct sk_buff *skb); + int (*send)(struct nci_dev *ndev, struct sk_buff *skb); }; #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 @@ -153,7 +153,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, void nci_free_device(struct nci_dev *ndev); int nci_register_device(struct nci_dev *ndev); void nci_unregister_device(struct nci_dev *ndev); -int nci_recv_frame(struct sk_buff *skb); +int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev, unsigned int len, diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 48ada0ec749e..8e0dbbeee9e3 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -797,12 +797,11 @@ EXPORT_SYMBOL(nci_unregister_device); /** * nci_recv_frame - receive frame from NCI drivers * + * @ndev: The nci device * @skb: The sk_buff to receive */ -int nci_recv_frame(struct sk_buff *skb) +int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) { - struct nci_dev *ndev = (struct nci_dev *) skb->dev; - pr_debug("len %d\n", skb->len); if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && @@ -819,10 +818,8 @@ int nci_recv_frame(struct sk_buff *skb) } EXPORT_SYMBOL(nci_recv_frame); -static int nci_send_frame(struct sk_buff *skb) +static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb) { - struct nci_dev *ndev = (struct nci_dev *) skb->dev; - pr_debug("len %d\n", skb->len); if (!ndev) { @@ -833,7 +830,7 @@ static int nci_send_frame(struct sk_buff *skb) /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); - return ndev->ops->send(skb); + return ndev->ops->send(ndev, skb); } /* Send NCI command */ @@ -861,8 +858,6 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload) if (plen) memcpy(skb_put(skb, plen), payload, plen); - skb->dev = (void *) ndev; - skb_queue_tail(&ndev->cmd_q, skb); queue_work(ndev->cmd_wq, &ndev->cmd_work); @@ -894,7 +889,7 @@ static void nci_tx_work(struct work_struct *work) nci_conn_id(skb->data), nci_plen(skb->data)); - nci_send_frame(skb); + nci_send_frame(ndev, skb); mod_timer(&ndev->data_timer, jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT)); @@ -963,7 +958,7 @@ static void nci_cmd_work(struct work_struct *work) nci_opcode_oid(nci_opcode(skb->data)), nci_plen(skb->data)); - nci_send_frame(skb); + nci_send_frame(ndev, skb); mod_timer(&ndev->cmd_timer, jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index 76c48c5324f8..2a9399dd6c68 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c @@ -80,8 +80,6 @@ static inline void nci_push_data_hdr(struct nci_dev *ndev, nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT); nci_pbf_set((__u8 *)hdr, pbf); - - skb->dev = (void *) ndev; } static int nci_queue_tx_data_frags(struct nci_dev *ndev, -- cgit v1.2.3 From 9674da8759df0d6c0d24e1ede6e2a1acdef91e3c Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Mon, 29 Apr 2013 17:13:27 +0200 Subject: NFC: Add firmware upload netlink command As several NFC chipsets can have their firmwares upgraded and reflashed, this patchset adds a new netlink command to trigger that the driver loads or flashes a new firmware. This will allows userspace triggered firmware upgrade through netlink. The firmware name or hint is passed as a parameter, and the driver will eventually fetch the firmware binary through the request_firmware API. The cmd can only be executed when the nfc dev is not in use. Actual firmware loading/flashing is an asynchronous operation. Result of the operation shall send a new event up to user space through the nfc dev multicast socket. During operation, the nfc dev is not openable and thus not usable. Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz --- include/net/nfc/nfc.h | 2 ++ include/uapi/linux/nfc.h | 6 +++++ net/nfc/core.c | 46 +++++++++++++++++++++++++++++++++++ net/nfc/netlink.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++ net/nfc/nfc.h | 5 ++++ 5 files changed, 122 insertions(+) (limited to 'include') diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 5eb80bb3cbb2..3563dbdcaaf2 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -70,6 +70,7 @@ struct nfc_ops { int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target); int (*enable_se)(struct nfc_dev *dev, u32 secure_element); int (*disable_se)(struct nfc_dev *dev, u32 secure_element); + int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name); }; #define NFC_TARGET_IDX_ANY -1 @@ -104,6 +105,7 @@ struct nfc_dev { int targets_generation; struct device dev; bool dev_up; + bool fw_upload_in_progress; u8 rf_mode; bool polling; struct nfc_target *active_target; diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 7c6f627a717d..b6cbd164f146 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -69,6 +69,8 @@ * starting a poll from a device which has a secure element enabled means * we want to do SE based card emulation. * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element. + * @NFC_CMD_FW_UPLOAD: Request to Load/flash firmware, or event to inform that + * some firmware was loaded */ enum nfc_commands { NFC_CMD_UNSPEC, @@ -92,6 +94,7 @@ enum nfc_commands { NFC_CMD_DISABLE_SE, NFC_CMD_LLC_SDREQ, NFC_EVENT_LLC_SDRES, + NFC_CMD_FW_UPLOAD, /* private: internal use only */ __NFC_CMD_AFTER_LAST }; @@ -121,6 +124,7 @@ enum nfc_commands { * @NFC_ATTR_LLC_PARAM_RW: Receive Window size parameter * @NFC_ATTR_LLC_PARAM_MIUX: MIU eXtension parameter * @NFC_ATTR_SE: Available Secure Elements + * @NFC_ATTR_FIRMWARE_NAME: Free format firmware version */ enum nfc_attrs { NFC_ATTR_UNSPEC, @@ -143,6 +147,7 @@ enum nfc_attrs { NFC_ATTR_LLC_PARAM_MIUX, NFC_ATTR_SE, NFC_ATTR_LLC_SDP, + NFC_ATTR_FIRMWARE_NAME, /* private: internal use only */ __NFC_ATTR_AFTER_LAST }; @@ -162,6 +167,7 @@ enum nfc_sdp_attr { #define NFC_SENSB_RES_MAXSIZE 12 #define NFC_SENSF_RES_MAXSIZE 18 #define NFC_GB_MAXSIZE 48 +#define NFC_FIRMWARE_NAME_MAXSIZE 32 /* NFC protocols */ #define NFC_PROTO_JEWEL 1 diff --git a/net/nfc/core.c b/net/nfc/core.c index 40d2527693da..eb3cecf1764e 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -44,6 +44,47 @@ DEFINE_MUTEX(nfc_devlist_mutex); /* NFC device ID bitmap */ static DEFINE_IDA(nfc_index_ida); +int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name) +{ + int rc = 0; + + pr_debug("%s do firmware %s\n", dev_name(&dev->dev), firmware_name); + + device_lock(&dev->dev); + + if (!device_is_registered(&dev->dev)) { + rc = -ENODEV; + goto error; + } + + if (dev->dev_up) { + rc = -EBUSY; + goto error; + } + + if (!dev->ops->fw_upload) { + rc = -EOPNOTSUPP; + goto error; + } + + dev->fw_upload_in_progress = true; + rc = dev->ops->fw_upload(dev, firmware_name); + if (rc) + dev->fw_upload_in_progress = false; + +error: + device_unlock(&dev->dev); + return rc; +} + +int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name) +{ + dev->fw_upload_in_progress = false; + + return nfc_genl_fw_upload_done(dev, firmware_name); +} +EXPORT_SYMBOL(nfc_fw_upload_done); + /** * nfc_dev_up - turn on the NFC device * @@ -69,6 +110,11 @@ int nfc_dev_up(struct nfc_dev *dev) goto error; } + if (dev->fw_upload_in_progress) { + rc = -EBUSY; + goto error; + } + if (dev->dev_up) { rc = -EALREADY; goto error; diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index f0c4d61f37c0..1deadad9a285 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -56,6 +56,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { [NFC_ATTR_LLC_PARAM_RW] = { .type = NLA_U8 }, [NFC_ATTR_LLC_PARAM_MIUX] = { .type = NLA_U16 }, [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED }, + [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING, + .len = NFC_FIRMWARE_NAME_MAXSIZE }, }; static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { @@ -1025,6 +1027,62 @@ exit: return rc; } +static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info) +{ + struct nfc_dev *dev; + int rc; + u32 idx; + char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; + + if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + return -EINVAL; + + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); + + dev = nfc_get_device(idx); + if (!dev) + return -ENODEV; + + nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME], + sizeof(firmware_name)); + + rc = nfc_fw_upload(dev, firmware_name); + + nfc_put_device(dev); + return rc; +} + +int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, + NFC_CMD_FW_UPLOAD); + if (!hdr) + goto free_msg; + + if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) || + nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); +free_msg: + nlmsg_free(msg); + return -EMSGSIZE; +} + static struct genl_ops nfc_genl_ops[] = { { .cmd = NFC_CMD_GET_DEVICE, @@ -1084,6 +1142,11 @@ static struct genl_ops nfc_genl_ops[] = { .doit = nfc_genl_llc_sdreq, .policy = nfc_genl_policy, }, + { + .cmd = NFC_CMD_FW_UPLOAD, + .doit = nfc_genl_fw_upload, + .policy = nfc_genl_policy, + }, }; diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index afa1f84ba040..cf0c48165996 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -120,6 +120,11 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter) class_dev_iter_exit(iter); } +int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name); +int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name); + +int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name); + int nfc_dev_up(struct nfc_dev *dev); int nfc_dev_down(struct nfc_dev *dev); -- cgit v1.2.3 From 9a695d23aab889273821c91b4132f1ed315b251b Mon Sep 17 00:00:00 2001 From: Eric Lapuyade Date: Mon, 29 Apr 2013 17:47:42 +0200 Subject: NFC: HCI: Implement fw_upload ops This is a simple forward to the HCI driver. When driver is done with the operation, it shall directly notify NFC Core by calling nfc_fw_upload_done(). Signed-off-by: Eric Lapuyade Signed-off-by: Samuel Ortiz --- include/net/nfc/hci.h | 1 + net/nfc/hci/core.c | 11 +++++++++++ 2 files changed, 12 insertions(+) (limited to 'include') diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index b87a1692b086..14faf2dc7a48 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -61,6 +61,7 @@ struct nfc_hci_ops { struct sk_buff *skb); int (*enable_se)(struct nfc_dev *dev, u32 secure_element); int (*disable_se)(struct nfc_dev *dev, u32 secure_element); + int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name); }; /* Pipes */ diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 91020b210d87..b7e4dac5654e 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -779,6 +779,16 @@ static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb) } } +static int hci_fw_upload(struct nfc_dev *nfc_dev, const char *firmware_name) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->ops->fw_upload) + return hdev->ops->fw_upload(hdev, firmware_name); + + return -ENOTSUPP; +} + static struct nfc_ops hci_nfc_ops = { .dev_up = hci_dev_up, .dev_down = hci_dev_down, @@ -791,6 +801,7 @@ static struct nfc_ops hci_nfc_ops = { .im_transceive = hci_transceive, .tm_send = hci_tm_send, .check_presence = hci_check_presence, + .fw_upload = hci_fw_upload, }; struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, -- cgit v1.2.3 From 5f121b9a83b499a61ed44e5ba619c7de8f7271ad Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 13 Jun 2013 15:29:38 -0400 Subject: net-rps: fixes for rps flow limit Caught by sparse: - __rcu: missing annotation to sd->flow_limit - __user: direct access in cpumask_scnprintf Also - add endline character when printing bitmap if room in buffer - avoid bucket overflow by reducing FLOW_LIMIT_HISTORY The last item warrants some explanation. The hashtable buckets are subject to overflow if FLOW_LIMIT_HISTORY is larger than or equal to bucket size, since all packets may end up in a single bucket. The current (rather arbitrary) history value of 256 happens to match the buffer size (u8). As a result, with a single flow, the first 128 packets are accepted (correct), the second 128 packets dropped (correct) and then the history[] array has filled, so that each subsequent new packet causes an increment in the bucket for new_flow plus a decrement for old_flow: a steady state. This is fine if packets are dropped, as the steady state goes away as soon as a mix of traffic reappears. But, because the 256th packet overflowed the bucket to 0: no packets are dropped. Instead of explicitly adding an overflow check, this patch changes FLOW_LIMIT_HISTORY to never be able to overflow a single bucket. Reported-by: Fengguang Wu (first item) Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++-- net/core/sysctl_net_core.c | 19 ++++++++++++++++--- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e5d65573b4d6..8c9fcc42502a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1840,7 +1840,7 @@ static inline int unregister_gifconf(unsigned int family) } #ifdef CONFIG_NET_FLOW_LIMIT -#define FLOW_LIMIT_HISTORY (1 << 8) /* must be ^2 */ +#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { u64 count; unsigned int num_buckets; @@ -1883,7 +1883,7 @@ struct softnet_data { struct napi_struct backlog; #ifdef CONFIG_NET_FLOW_LIMIT - struct sd_flow_limit *flow_limit; + struct sd_flow_limit __rcu *flow_limit; #endif }; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 637a42e5d589..78c746e016ae 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -132,6 +132,8 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write, write_unlock: mutex_unlock(&flow_limit_update_mutex); } else { + char kbuf[128]; + if (*ppos || !*lenp) { *lenp = 0; goto done; @@ -146,9 +148,20 @@ write_unlock: } rcu_read_unlock(); - len = cpumask_scnprintf(buffer, *lenp, mask); - *lenp = len + 1; - *ppos += len + 1; + len = min(sizeof(kbuf) - 1, *lenp); + len = cpumask_scnprintf(kbuf, len, mask); + if (!len) { + *lenp = 0; + goto done; + } + if (len < *lenp) + kbuf[len++] = '\n'; + if (copy_to_user(buffer, kbuf, len)) { + ret = -EFAULT; + goto done; + } + *lenp = len; + *ppos += len; } done: -- cgit v1.2.3 From 1d8faf48c74b8329a0322dc4b2a2030ae5003c86 Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 13 Jun 2013 13:19:10 +0300 Subject: net/core: Add VF link state control Add netlink directives and ndo entry to allow for controling VF link, which can be in one of three states: Auto - VF link state reflects the PF link state (default) Up - VF link state is up, traffic from VF to VF works even if the actual PF link is down Down - VF link state is down, no traffic from/to this VF, can be of use while configuring the VF Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- include/linux/if_link.h | 1 + include/linux/netdevice.h | 3 +++ include/uapi/linux/if_link.h | 13 +++++++++++++ net/core/rtnetlink.c | 22 ++++++++++++++++++++-- 4 files changed, 37 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/if_link.h b/include/linux/if_link.h index c3f817c3eb45..a86784dec3d3 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -12,5 +12,6 @@ struct ifla_vf_info { __u32 qos; __u32 tx_rate; __u32 spoofchk; + __u32 linkstate; }; #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8c9fcc42502a..09b4188c1ea7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -829,6 +829,7 @@ struct netdev_fcoe_hbainfo { * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_config)(struct net_device *dev, * int vf, struct ifla_vf_info *ivf); + * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); * int (*ndo_set_vf_port)(struct net_device *dev, int vf, * struct nlattr *port[]); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); @@ -986,6 +987,8 @@ struct net_device_ops { int (*ndo_get_vf_config)(struct net_device *dev, int vf, struct ifla_vf_info *ivf); + int (*ndo_set_vf_link_state)(struct net_device *dev, + int vf, int link_state); int (*ndo_set_vf_port)(struct net_device *dev, int vf, struct nlattr *port[]); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index da05a2698cb5..03f6170ab337 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -338,6 +338,7 @@ enum { IFLA_VF_VLAN, IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ + IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ __IFLA_VF_MAX, }; @@ -364,6 +365,18 @@ struct ifla_vf_spoofchk { __u32 setting; }; +enum { + IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ + IFLA_VF_LINK_STATE_ENABLE, /* link always up */ + IFLA_VF_LINK_STATE_DISABLE, /* link always down */ + __IFLA_VF_LINK_STATE_MAX, +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + /* VF ports management section * * Nested layout of set/get msg is: diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 49c14451d8ab..9007533867f0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -947,6 +947,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, struct ifla_vf_vlan vf_vlan; struct ifla_vf_tx_rate vf_tx_rate; struct ifla_vf_spoofchk vf_spoofchk; + struct ifla_vf_link_state vf_linkstate; /* * Not all SR-IOV capable drivers support the @@ -956,18 +957,24 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, */ ivi.spoofchk = -1; memset(ivi.mac, 0, sizeof(ivi.mac)); + /* The default value for VF link state is "auto" + * IFLA_VF_LINK_STATE_AUTO which equals zero + */ + ivi.linkstate = 0; if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = - vf_spoofchk.vf = ivi.vf; + vf_spoofchk.vf = + vf_linkstate.vf = ivi.vf; memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); vf_vlan.vlan = ivi.vlan; vf_vlan.qos = ivi.qos; vf_tx_rate.rate = ivi.tx_rate; vf_spoofchk.setting = ivi.spoofchk; + vf_linkstate.link_state = ivi.linkstate; vf = nla_nest_start(skb, IFLA_VF_INFO); if (!vf) { nla_nest_cancel(skb, vfinfo); @@ -978,7 +985,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate) || nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), - &vf_spoofchk)) + &vf_spoofchk) || + nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate), + &vf_linkstate)) goto nla_put_failure; nla_nest_end(skb, vf); } @@ -1238,6 +1247,15 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) ivs->setting); break; } + case IFLA_VF_LINK_STATE: { + struct ifla_vf_link_state *ivl; + ivl = nla_data(vf); + err = -EOPNOTSUPP; + if (ops->ndo_set_vf_link_state) + err = ops->ndo_set_vf_link_state(dev, ivl->vf, + ivl->link_state); + break; + } default: err = -EINVAL; break; -- cgit v1.2.3 From 948e306d7d645af80ea331b60495710fe4fe12bb Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 13 Jun 2013 13:19:11 +0300 Subject: net/mlx4: Add VF link state support Add support to change the link state of VF (vPort) Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 48 ++++++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 8 +++++ drivers/net/ethernet/mellanox/mlx4/eq.c | 9 +++-- drivers/net/ethernet/mellanox/mlx4/fw.c | 8 +++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 + include/linux/mlx4/cmd.h | 2 +- 6 files changed, 73 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 0e572a527154..ea1e03896353 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -39,6 +39,7 @@ #include #include +#include #include #include @@ -2178,7 +2179,54 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in ivf->qos = s_info->default_qos; ivf->tx_rate = s_info->tx_rate; ivf->spoofchk = s_info->spoofchk; + ivf->linkstate = s_info->link_state; return 0; } EXPORT_SYMBOL_GPL(mlx4_get_vf_config); + +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + struct mlx4_vport_oper_state *vp_oper; + int slave; + u8 link_stat_event; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + /* get current link state */ + if (!priv->sense.do_sense_port[port]) + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + else + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + case IFLA_VF_LINK_STATE_ENABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + break; + + case IFLA_VF_LINK_STATE_DISABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + default: + mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", + link_state, slave, port); + return -EINVAL; + }; + /* update the admin & oper state on the link state */ + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + s_info->link_state = link_state; + vp_oper->state.link_state = link_state; + + /* send event */ + mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 89c47ea84b50..ade276cca0e6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2061,6 +2061,13 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_ return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); } +static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); +} static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2101,6 +2108,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_set_vf_mac = mlx4_en_set_vf_mac, .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, + .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, .ndo_get_vf_config = mlx4_en_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 6000342f9725..7e042869ef0c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -448,6 +448,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) int i; enum slave_port_gen_event gen_event; unsigned long flags; + struct mlx4_vport_state *s_info; while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { /* @@ -556,7 +557,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" " to slave: %d, port:%d\n", __func__, i, port); - mlx4_slave_event(dev, i, eqe); + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + mlx4_slave_event(dev, i, eqe); } else { /* IB port */ set_and_calc_slave_port_state(dev, i, port, MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, @@ -580,7 +583,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) for (i = 0; i < dev->num_slaves; i++) { if (i == mlx4_master_func_num(dev)) continue; - mlx4_slave_event(dev, i, eqe); + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + mlx4_slave_event(dev, i, eqe); } else /* IB port */ /* port-up event will be sent to a slave when the diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2c97901c6a6d..569bbe3e7403 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -830,8 +830,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, u8 port_type; u16 short_field; int err; + int admin_link_state; #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 +#define MLX4_PORT_LINK_UP_MASK 0x80 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e @@ -861,6 +863,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, /* set port type to currently operating port type */ port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); + admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; + if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) + port_type |= MLX4_PORT_LINK_UP_MASK; + else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) + port_type &= ~MLX4_PORT_LINK_UP_MASK; + MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index df15bb6631cc..75272935a3f7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -482,6 +482,7 @@ struct mlx4_vport_state { u8 default_qos; u32 tx_rate; bool spoofchk; + u32 link_state; }; struct mlx4_vf_admin_state { diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index adf6e0648f20..8074a9711cf1 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -237,7 +237,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); - +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) -- cgit v1.2.3 From 8a00a61b0ef2bfd1b468dd20c0d0b1a94a8f7475 Mon Sep 17 00:00:00 2001 From: Frederic Danis Date: Wed, 29 May 2013 15:35:02 +0200 Subject: NFC: Add basic NCI over SPI The NFC Forum defines a transport interface based on Serial Peripheral Interface (SPI) for the NFC Controller Interface (NCI). This module implements the SPI transport of NCI, calling SPI module directly to read/write data to NFC controller (NFCC). NFCC driver should provide functions performing device open and close. It should also provide functions asserting/de-asserting interruption to prevent TX/RX race conditions. NFCC driver can also fix a delay between transactions if needed by the hardware. Signed-off-by: Frederic Danis Signed-off-by: Samuel Ortiz --- include/net/nfc/nci_core.h | 49 ++++++++++++++++ net/nfc/nci/Kconfig | 10 ++++ net/nfc/nci/Makefile | 4 +- net/nfc/nci/spi.c | 136 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 198 insertions(+), 1 deletion(-) create mode 100644 net/nfc/nci/spi.c (limited to 'include') diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 1009d3dcb316..3b05bebd8235 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -3,6 +3,7 @@ * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2013 Intel Corporation. All rights reserved. * * Written by Ilan Elias * @@ -202,4 +203,52 @@ void nci_req_complete(struct nci_dev *ndev, int result); /* ----- NCI status code ----- */ int nci_to_errno(__u8 code); +/* ----- NCI over SPI acknowledge modes ----- */ +#define NCI_SPI_CRC_DISABLED 0x00 +#define NCI_SPI_CRC_ENABLED 0x01 + +/* ----- NCI SPI structures ----- */ +struct nci_spi_dev; + +struct nci_spi_ops { + int (*open)(struct nci_spi_dev *ndev); + int (*close)(struct nci_spi_dev *ndev); + void (*assert_int)(struct nci_spi_dev *ndev); + void (*deassert_int)(struct nci_spi_dev *ndev); +}; + +struct nci_spi_dev { + struct nci_dev *nci_dev; + struct spi_device *spi; + struct nci_spi_ops *ops; + + unsigned int xfer_udelay; /* microseconds delay between + transactions */ + u8 acknowledge_mode; + + void *driver_data; +}; + +/* ----- NCI SPI Devices ----- */ +struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi, + struct nci_spi_ops *ops, + u32 supported_protocols, + u32 supported_se, + u8 acknowledge_mode, + unsigned int delay); +void nci_spi_free_device(struct nci_spi_dev *ndev); +int nci_spi_register_device(struct nci_spi_dev *ndev); +void nci_spi_unregister_device(struct nci_spi_dev *ndev); + +static inline void nci_spi_set_drvdata(struct nci_spi_dev *ndev, + void *data) +{ + ndev->driver_data = data; +} + +static inline void *nci_spi_get_drvdata(struct nci_spi_dev *ndev) +{ + return ndev->driver_data; +} + #endif /* __NCI_CORE_H */ diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig index 6d69b5f0f19b..2a2416080b4f 100644 --- a/net/nfc/nci/Kconfig +++ b/net/nfc/nci/Kconfig @@ -8,3 +8,13 @@ config NFC_NCI Say Y here to compile NCI support into the kernel or say M to compile it as module (nci). + +config NFC_NCI_SPI + depends on NFC_NCI && SPI + bool "NCI over SPI protocol support" + default n + help + NCI (NFC Controller Interface) is a communication protocol between + an NFC Controller (NFCC) and a Device Host (DH). + + Say yes if you use an NCI driver that requires SPI link layer. diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile index cdb3a2e44471..7aeedc43187d 100644 --- a/net/nfc/nci/Makefile +++ b/net/nfc/nci/Makefile @@ -4,4 +4,6 @@ obj-$(CONFIG_NFC_NCI) += nci.o -nci-objs := core.o data.o lib.o ntf.o rsp.o \ No newline at end of file +nci-objs := core.o data.o lib.o ntf.o rsp.o + +nci-$(CONFIG_NFC_NCI_SPI) += spi.o diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c new file mode 100644 index 000000000000..ebcdba51418c --- /dev/null +++ b/net/nfc/nci/spi.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#define pr_fmt(fmt) "nci_spi: %s: " fmt, __func__ + +#include +#include +#include +#include + +#define NCI_SPI_HDR_LEN 4 +#define NCI_SPI_CRC_LEN 2 + +static int nci_spi_open(struct nci_dev *nci_dev) +{ + struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev); + + return ndev->ops->open(ndev); +} + +static int nci_spi_close(struct nci_dev *nci_dev) +{ + struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev); + + return ndev->ops->close(ndev); +} + +static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb) +{ + return 0; +} + +static struct nci_ops nci_spi_ops = { + .open = nci_spi_open, + .close = nci_spi_close, + .send = nci_spi_send, +}; + +/* ---- Interface to NCI SPI drivers ---- */ + +/** + * nci_spi_allocate_device - allocate a new nci spi device + * + * @spi: SPI device + * @ops: device operations + * @supported_protocols: NFC protocols supported by the device + * @supported_se: NFC Secure Elements supported by the device + * @acknowledge_mode: Acknowledge mode used by the device + * @delay: delay between transactions in us + */ +struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi, + struct nci_spi_ops *ops, + u32 supported_protocols, + u32 supported_se, + u8 acknowledge_mode, + unsigned int delay) +{ + struct nci_spi_dev *ndev; + int tailroom = 0; + + if (!ops->open || !ops->close || !ops->assert_int || !ops->deassert_int) + return NULL; + + if (!supported_protocols) + return NULL; + + ndev = devm_kzalloc(&spi->dev, sizeof(struct nci_dev), GFP_KERNEL); + if (!ndev) + return NULL; + + ndev->ops = ops; + ndev->acknowledge_mode = acknowledge_mode; + ndev->xfer_udelay = delay; + + if (acknowledge_mode == NCI_SPI_CRC_ENABLED) + tailroom += NCI_SPI_CRC_LEN; + + ndev->nci_dev = nci_allocate_device(&nci_spi_ops, supported_protocols, + supported_se, NCI_SPI_HDR_LEN, + tailroom); + if (!ndev->nci_dev) + return NULL; + + nci_set_drvdata(ndev->nci_dev, ndev); + + return ndev; +} +EXPORT_SYMBOL_GPL(nci_spi_allocate_device); + +/** + * nci_spi_free_device - deallocate nci spi device + * + * @ndev: The nci spi device to deallocate + */ +void nci_spi_free_device(struct nci_spi_dev *ndev) +{ + nci_free_device(ndev->nci_dev); +} +EXPORT_SYMBOL_GPL(nci_spi_free_device); + +/** + * nci_spi_register_device - register a nci spi device in the nfc subsystem + * + * @pdev: The nci spi device to register + */ +int nci_spi_register_device(struct nci_spi_dev *ndev) +{ + return nci_register_device(ndev->nci_dev); +} +EXPORT_SYMBOL_GPL(nci_spi_register_device); + +/** + * nci_spi_unregister_device - unregister a nci spi device in the nfc subsystem + * + * @dev: The nci spi device to unregister + */ +void nci_spi_unregister_device(struct nci_spi_dev *ndev) +{ + nci_unregister_device(ndev->nci_dev); +} +EXPORT_SYMBOL_GPL(nci_spi_unregister_device); -- cgit v1.2.3 From ee9596d467e4d05c77a8c883aeeb5b74d1a3cd31 Mon Sep 17 00:00:00 2001 From: Frederic Danis Date: Wed, 29 May 2013 15:35:03 +0200 Subject: NFC: Add NCI over SPI send Before any operation, driver interruption is de-asserted to prevent race condition between TX and RX. The NCI over SPI header is added in front of NCI packet. If acknowledged mode is set, CRC-16-CCITT is added to the packet. Then the packet is forwarded to SPI module to be sent. A delay after the transaction is added. This delay is determined by the driver during nci_spi_allocate_device() call and can be 0. After data has been sent, driver interruption is re-asserted. If acknowledged mode is set, nci_spi_send will block until acknowledgment is received. Signed-off-by: Frederic Danis Signed-off-by: Samuel Ortiz --- include/net/nfc/nci_core.h | 3 ++ net/nfc/nci/spi.c | 71 +++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 3b05bebd8235..36df525d2ab3 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -226,6 +226,9 @@ struct nci_spi_dev { transactions */ u8 acknowledge_mode; + struct completion req_completion; + u8 req_result; + void *driver_data; }; diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c index ebcdba51418c..6258461e6998 100644 --- a/net/nfc/nci/spi.c +++ b/net/nfc/nci/spi.c @@ -20,12 +20,25 @@ #include #include +#include #include #include #define NCI_SPI_HDR_LEN 4 #define NCI_SPI_CRC_LEN 2 +#define NCI_SPI_SEND_TIMEOUT (NCI_CMD_TIMEOUT > NCI_DATA_TIMEOUT ? \ + NCI_CMD_TIMEOUT : NCI_DATA_TIMEOUT) + +#define NCI_SPI_DIRECT_WRITE 0x01 +#define NCI_SPI_DIRECT_READ 0x02 + +#define ACKNOWLEDGE_NONE 0 +#define ACKNOWLEDGE_ACK 1 +#define ACKNOWLEDGE_NACK 2 + +#define CRC_INIT 0xFFFF + static int nci_spi_open(struct nci_dev *nci_dev) { struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev); @@ -40,9 +53,65 @@ static int nci_spi_close(struct nci_dev *nci_dev) return ndev->ops->close(ndev); } +static int __nci_spi_send(struct nci_spi_dev *ndev, struct sk_buff *skb) +{ + struct spi_message m; + struct spi_transfer t; + + t.tx_buf = skb->data; + t.len = skb->len; + t.cs_change = 0; + t.delay_usecs = ndev->xfer_udelay; + + spi_message_init(&m); + spi_message_add_tail(&t, &m); + + return spi_sync(ndev->spi, &m); +} + static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb) { - return 0; + struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev); + unsigned int payload_len = skb->len; + unsigned char *hdr; + int ret; + long completion_rc; + + ndev->ops->deassert_int(ndev); + + /* add the NCI SPI header to the start of the buffer */ + hdr = skb_push(skb, NCI_SPI_HDR_LEN); + hdr[0] = NCI_SPI_DIRECT_WRITE; + hdr[1] = ndev->acknowledge_mode; + hdr[2] = payload_len >> 8; + hdr[3] = payload_len & 0xFF; + + if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) { + u16 crc; + + crc = crc_ccitt(CRC_INIT, skb->data, skb->len); + *skb_put(skb, 1) = crc >> 8; + *skb_put(skb, 1) = crc & 0xFF; + } + + ret = __nci_spi_send(ndev, skb); + + kfree_skb(skb); + ndev->ops->assert_int(ndev); + + if (ret != 0 || ndev->acknowledge_mode == NCI_SPI_CRC_DISABLED) + goto done; + + init_completion(&ndev->req_completion); + completion_rc = + wait_for_completion_interruptible_timeout(&ndev->req_completion, + NCI_SPI_SEND_TIMEOUT); + + if (completion_rc <= 0 || ndev->req_result == ACKNOWLEDGE_NACK) + ret = -EIO; + +done: + return ret; } static struct nci_ops nci_spi_ops = { -- cgit v1.2.3 From 391d8a2da787257aeaf952c974405b53926e3fb3 Mon Sep 17 00:00:00 2001 From: Frederic Danis Date: Wed, 29 May 2013 15:35:04 +0200 Subject: NFC: Add NCI over SPI receive Before any operation, driver interruption is de-asserted to prevent race condition between TX and RX. Transaction starts by emitting "Direct read" and acknowledged mode bytes. Then packet length is read allowing to allocate correct NCI socket buffer. After that payload is retrieved. A delay after the transaction can be added. This delay is determined by the driver during nci_spi_allocate_device() call and can be 0. If acknowledged mode is set: - CRC of header and payload is checked - if frame reception fails (CRC error): NACK is sent - if received frame has ACK or NACK flag: unblock nci_spi_send() Payload is passed to NCI module. At the end, driver interruption is re asserted. Signed-off-by: Frederic Danis Signed-off-by: Samuel Ortiz --- include/net/nfc/nci_core.h | 1 + net/nfc/nci/spi.c | 174 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 175 insertions(+) (limited to 'include') diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 36df525d2ab3..fc1296db237b 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -242,6 +242,7 @@ struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi, void nci_spi_free_device(struct nci_spi_dev *ndev); int nci_spi_register_device(struct nci_spi_dev *ndev); void nci_spi_unregister_device(struct nci_spi_dev *ndev); +int nci_spi_recv_frame(struct nci_spi_dev *ndev); static inline void nci_spi_set_drvdata(struct nci_spi_dev *ndev, void *data) diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c index 6258461e6998..70afc387a965 100644 --- a/net/nfc/nci/spi.c +++ b/net/nfc/nci/spi.c @@ -26,6 +26,8 @@ #define NCI_SPI_HDR_LEN 4 #define NCI_SPI_CRC_LEN 2 +#define NCI_SPI_ACK_SHIFT 6 +#define NCI_SPI_MSB_PAYLOAD_MASK 0x3F #define NCI_SPI_SEND_TIMEOUT (NCI_CMD_TIMEOUT > NCI_DATA_TIMEOUT ? \ NCI_CMD_TIMEOUT : NCI_DATA_TIMEOUT) @@ -203,3 +205,175 @@ void nci_spi_unregister_device(struct nci_spi_dev *ndev) nci_unregister_device(ndev->nci_dev); } EXPORT_SYMBOL_GPL(nci_spi_unregister_device); + +static int send_acknowledge(struct nci_spi_dev *ndev, u8 acknowledge) +{ + struct sk_buff *skb; + unsigned char *hdr; + u16 crc; + int ret; + + skb = nci_skb_alloc(ndev->nci_dev, 0, GFP_KERNEL); + + /* add the NCI SPI header to the start of the buffer */ + hdr = skb_push(skb, NCI_SPI_HDR_LEN); + hdr[0] = NCI_SPI_DIRECT_WRITE; + hdr[1] = NCI_SPI_CRC_ENABLED; + hdr[2] = acknowledge << NCI_SPI_ACK_SHIFT; + hdr[3] = 0; + + crc = crc_ccitt(CRC_INIT, skb->data, skb->len); + *skb_put(skb, 1) = crc >> 8; + *skb_put(skb, 1) = crc & 0xFF; + + ret = __nci_spi_send(ndev, skb); + + kfree_skb(skb); + + return ret; +} + +static struct sk_buff *__nci_spi_recv_frame(struct nci_spi_dev *ndev) +{ + struct sk_buff *skb; + struct spi_message m; + unsigned char req[2], resp_hdr[2]; + struct spi_transfer tx, rx; + unsigned short rx_len = 0; + int ret; + + spi_message_init(&m); + req[0] = NCI_SPI_DIRECT_READ; + req[1] = ndev->acknowledge_mode; + tx.tx_buf = req; + tx.len = 2; + tx.cs_change = 0; + spi_message_add_tail(&tx, &m); + rx.rx_buf = resp_hdr; + rx.len = 2; + rx.cs_change = 1; + spi_message_add_tail(&rx, &m); + ret = spi_sync(ndev->spi, &m); + + if (ret) + return NULL; + + if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) + rx_len = ((resp_hdr[0] & NCI_SPI_MSB_PAYLOAD_MASK) << 8) + + resp_hdr[1] + NCI_SPI_CRC_LEN; + else + rx_len = (resp_hdr[0] << 8) | resp_hdr[1]; + + skb = nci_skb_alloc(ndev->nci_dev, rx_len, GFP_KERNEL); + if (!skb) + return NULL; + + spi_message_init(&m); + rx.rx_buf = skb_put(skb, rx_len); + rx.len = rx_len; + rx.cs_change = 0; + rx.delay_usecs = ndev->xfer_udelay; + spi_message_add_tail(&rx, &m); + ret = spi_sync(ndev->spi, &m); + + if (ret) + goto receive_error; + + if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) { + *skb_push(skb, 1) = resp_hdr[1]; + *skb_push(skb, 1) = resp_hdr[0]; + } + + return skb; + +receive_error: + kfree_skb(skb); + + return NULL; +} + +static int nci_spi_check_crc(struct sk_buff *skb) +{ + u16 crc_data = (skb->data[skb->len - 2] << 8) | + skb->data[skb->len - 1]; + int ret; + + ret = (crc_ccitt(CRC_INIT, skb->data, skb->len - NCI_SPI_CRC_LEN) + == crc_data); + + skb_trim(skb, skb->len - NCI_SPI_CRC_LEN); + + return ret; +} + +static u8 nci_spi_get_ack(struct sk_buff *skb) +{ + u8 ret; + + ret = skb->data[0] >> NCI_SPI_ACK_SHIFT; + + /* Remove NFCC part of the header: ACK, NACK and MSB payload len */ + skb_pull(skb, 2); + + return ret; +} + +/** + * nci_spi_recv_frame - receive frame from NCI SPI drivers + * + * @ndev: The nci spi device + * Context: can sleep + * + * This call may only be used from a context that may sleep. The sleep + * is non-interruptible, and has no timeout. + * + * It returns zero on success, else a negative error code. + */ +int nci_spi_recv_frame(struct nci_spi_dev *ndev) +{ + struct sk_buff *skb; + int ret = 0; + + ndev->ops->deassert_int(ndev); + + /* Retrieve frame from SPI */ + skb = __nci_spi_recv_frame(ndev); + if (!skb) { + ret = -EIO; + goto done; + } + + if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) { + if (!nci_spi_check_crc(skb)) { + send_acknowledge(ndev, ACKNOWLEDGE_NACK); + goto done; + } + + /* In case of acknowledged mode: if ACK or NACK received, + * unblock completion of latest frame sent. + */ + ndev->req_result = nci_spi_get_ack(skb); + if (ndev->req_result) + complete(&ndev->req_completion); + } + + /* If there is no payload (ACK/NACK only frame), + * free the socket buffer + */ + if (skb->len == 0) { + kfree_skb(skb); + goto done; + } + + if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) + send_acknowledge(ndev, ACKNOWLEDGE_ACK); + + /* Forward skb to NCI core layer */ + ret = nci_recv_frame(ndev->nci_dev, skb); + +done: + ndev->ops->assert_int(ndev); + + return ret; +} +EXPORT_SYMBOL_GPL(nci_spi_recv_frame); -- cgit v1.2.3 From 322bce957e9b0e30ef7147dae0414ad8f3f558c8 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Mon, 27 May 2013 15:29:11 +0200 Subject: NFC: pn533: Copy NFCID2 through ATR_REQ When using NFC-F we should copy the NFCID2 buffer that we got from SENSF_RES through the ATR_REQ NFCID3 buffer. Not doing so violates NFC Forum digital requirement #189. Signed-off-by: Samuel Ortiz --- drivers/nfc/pn533.c | 14 +++++++++++++- include/net/nfc/nfc.h | 2 ++ include/uapi/linux/nfc.h | 2 ++ 3 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 6bd4f598b3e1..e196bdfcfc30 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -1235,7 +1235,7 @@ static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data, struct pn533_target_felica { u8 pol_res; u8 opcode; - u8 nfcid2[8]; + u8 nfcid2[NFC_NFCID2_MAXSIZE]; u8 pad[8]; /* optional */ u8 syst_code[]; @@ -1275,6 +1275,9 @@ static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data, memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9); nfc_tgt->sensf_res_len = 9; + memcpy(nfc_tgt->nfcid2, tgt_felica->nfcid2, NFC_NFCID2_MAXSIZE); + nfc_tgt->nfcid2_len = NFC_NFCID2_MAXSIZE; + return 0; } @@ -2084,6 +2087,9 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, if (comm_mode == NFC_COMM_PASSIVE) skb_len += PASSIVE_DATA_LEN; + if (target && target->nfcid2_len) + skb_len += NFC_NFCID3_MAXSIZE; + skb = pn533_alloc_skb(dev, skb_len); if (!skb) return -ENOMEM; @@ -2100,6 +2106,12 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, *next |= 1; } + if (target && target->nfcid2_len) { + memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2, + target->nfcid2_len); + *next |= 2; + } + if (gb != NULL && gb_len > 0) { memcpy(skb_put(skb, gb_len), gb, gb_len); *next |= 4; /* We have some Gi */ diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 3563dbdcaaf2..8fc1784a264d 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -84,6 +84,8 @@ struct nfc_target { u8 sel_res; u8 nfcid1_len; u8 nfcid1[NFC_NFCID1_MAXSIZE]; + u8 nfcid2_len; + u8 nfcid2[NFC_NFCID2_MAXSIZE]; u8 sensb_res_len; u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; u8 sensf_res_len; diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index b6cbd164f146..fb304fb774cc 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -164,6 +164,8 @@ enum nfc_sdp_attr { #define NFC_DEVICE_NAME_MAXSIZE 8 #define NFC_NFCID1_MAXSIZE 10 +#define NFC_NFCID2_MAXSIZE 8 +#define NFC_NFCID3_MAXSIZE 10 #define NFC_SENSB_RES_MAXSIZE 12 #define NFC_SENSF_RES_MAXSIZE 18 #define NFC_GB_MAXSIZE 48 -- cgit v1.2.3 From 0b456c418a5595b9d67f300c9ac6a2441e774603 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Tue, 7 May 2013 19:22:11 +0200 Subject: NFC: Remove the static supported_se field Supported secure elements are typically found during a discovery process initiated when the NFC controller is up and running. For a given NFC chipset there can be many configurations (embedded SE or not, with or without a SIM card wired to the NFC controller SWP interface, etc...) and thus driver code will never know before hand which SEs are available. So we remove this field, it will be replaced by a real SE discovery mechanism. Signed-off-by: Samuel Ortiz --- drivers/nfc/microread/microread.c | 6 ++---- drivers/nfc/nfcwilink.c | 1 - drivers/nfc/pn533.c | 1 - drivers/nfc/pn544/pn544.c | 6 ++---- include/net/nfc/hci.h | 1 - include/net/nfc/nci_core.h | 1 - include/net/nfc/nfc.h | 2 -- net/nfc/core.c | 2 -- net/nfc/hci/core.c | 3 +-- net/nfc/nci/core.c | 2 -- net/nfc/nci/spi.c | 3 +-- net/nfc/netlink.c | 1 - 12 files changed, 6 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c index 3420d833db17..cdb9f6de132a 100644 --- a/drivers/nfc/microread/microread.c +++ b/drivers/nfc/microread/microread.c @@ -650,7 +650,7 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, { struct microread_info *info; unsigned long quirks = 0; - u32 protocols, se; + u32 protocols; struct nfc_hci_init_data init_data; int r; @@ -678,10 +678,8 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_NFC_DEP_MASK; - se = NFC_SE_UICC | NFC_SE_EMBEDDED; - info->hdev = nfc_hci_allocate_device(µread_hci_ops, &init_data, - quirks, protocols, se, llc_name, + quirks, protocols, llc_name, phy_headroom + MICROREAD_CMDS_HEADROOM, phy_tailroom + diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c index 41cf8f70a6ad..59f95d8fc98c 100644 --- a/drivers/nfc/nfcwilink.c +++ b/drivers/nfc/nfcwilink.c @@ -535,7 +535,6 @@ static int nfcwilink_probe(struct platform_device *pdev) drv->ndev = nci_allocate_device(&nfcwilink_ops, protocols, - NFC_SE_NONE, NFCWILINK_HDR_LEN, 0); if (!drv->ndev) { diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index e196bdfcfc30..a1b46aa7b4d5 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -2791,7 +2791,6 @@ static int pn533_probe(struct usb_interface *interface, dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, - NFC_SE_NONE, dev->ops->tx_header_len + PN533_CMD_DATAEXCH_HEAD_LEN, dev->ops->tx_tail_len); diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c index 84b5168b603c..0d17da7675b7 100644 --- a/drivers/nfc/pn544/pn544.c +++ b/drivers/nfc/pn544/pn544.c @@ -803,7 +803,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, struct nfc_hci_dev **hdev) { struct pn544_hci_info *info; - u32 protocols, se; + u32 protocols; struct nfc_hci_init_data init_data; int r; @@ -836,10 +836,8 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_NFC_DEP_MASK; - se = NFC_SE_UICC | NFC_SE_EMBEDDED; - info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data, 0, - protocols, se, llc_name, + protocols, llc_name, phy_headroom + PN544_CMDS_HEADROOM, phy_tailroom, phy_payload); if (!info->hdev) { diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 14faf2dc7a48..eca8846a63d6 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -153,7 +153,6 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, struct nfc_hci_init_data *init_data, unsigned long quirks, u32 protocols, - u32 supported_se, const char *llc_name, int tx_headroom, int tx_tailroom, diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index fc1296db237b..99fc1f3a392a 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -148,7 +148,6 @@ struct nci_dev { /* ----- NCI Devices ----- */ struct nci_dev *nci_allocate_device(struct nci_ops *ops, __u32 supported_protocols, - __u32 supported_se, int tx_headroom, int tx_tailroom); void nci_free_device(struct nci_dev *ndev); diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 8fc1784a264d..9900c0f5d6bd 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -115,7 +115,6 @@ struct nfc_dev { struct nfc_genl_data genl_data; u32 supported_protocols; - u32 supported_se; u32 active_se; int tx_headroom; @@ -136,7 +135,6 @@ extern struct class nfc_class; struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, u32 supported_protocols, - u32 supported_se, int tx_headroom, int tx_tailroom); diff --git a/net/nfc/core.c b/net/nfc/core.c index eb3cecf1764e..334954a1d6e8 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -832,7 +832,6 @@ struct nfc_dev *nfc_get_device(unsigned int idx) */ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, u32 supported_protocols, - u32 supported_se, int tx_headroom, int tx_tailroom) { struct nfc_dev *dev; @@ -850,7 +849,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, dev->ops = ops; dev->supported_protocols = supported_protocols; - dev->supported_se = supported_se; dev->active_se = NFC_SE_NONE; dev->tx_headroom = tx_headroom; dev->tx_tailroom = tx_tailroom; diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index d2ef1e2ee0c6..9c8a63d341d3 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -808,7 +808,6 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, struct nfc_hci_init_data *init_data, unsigned long quirks, u32 protocols, - u32 supported_se, const char *llc_name, int tx_headroom, int tx_tailroom, @@ -834,7 +833,7 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, return NULL; } - hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, supported_se, + hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, tx_headroom + HCI_CMDS_HEADROOM, tx_tailroom); if (!hdev->ndev) { diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 8e0dbbeee9e3..145bad15e113 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -658,7 +658,6 @@ static struct nfc_ops nci_nfc_ops = { */ struct nci_dev *nci_allocate_device(struct nci_ops *ops, __u32 supported_protocols, - __u32 supported_se, int tx_headroom, int tx_tailroom) { struct nci_dev *ndev; @@ -681,7 +680,6 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, supported_protocols, - supported_se, tx_headroom + NCI_DATA_HDR_SIZE, tx_tailroom); if (!ndev->nfc_dev) diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c index 70afc387a965..c7cf37ba7298 100644 --- a/net/nfc/nci/spi.c +++ b/net/nfc/nci/spi.c @@ -162,8 +162,7 @@ struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi, tailroom += NCI_SPI_CRC_LEN; ndev->nci_dev = nci_allocate_device(&nci_spi_ops, supported_protocols, - supported_se, NCI_SPI_HDR_LEN, - tailroom); + NCI_SPI_HDR_LEN, tailroom); if (!ndev->nci_dev) return NULL; diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 1deadad9a285..fdbc662c564a 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -444,7 +444,6 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || - nla_put_u32(msg, NFC_ATTR_SE, dev->supported_se) || nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) goto nla_put_failure; -- cgit v1.2.3 From 0a946301c2d3eac8673e556df820c0b6023ac6c3 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 10 May 2013 11:57:06 +0200 Subject: NFC: Extend and fix the internal secure element API Secure elements need to be discovered after enabling the NFC controller. This is typically done by the NCI core and the HCI drivers (HCI does not specify how to discover SEs, it is left to the specific drivers). Also, the SE enable/disable API explicitely takes a SE index as its argument. Signed-off-by: Samuel Ortiz --- include/net/nfc/hci.h | 5 +++-- include/net/nfc/nfc.h | 7 +++++-- net/nfc/core.c | 7 +++++++ net/nfc/hci/core.c | 33 +++++++++++++++++++++++++++++++++ net/nfc/nci/core.c | 18 ++++++++++++++++++ 5 files changed, 66 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index eca8846a63d6..0af851c3b038 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -59,9 +59,10 @@ struct nfc_hci_ops { struct nfc_target *target); int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event, struct sk_buff *skb); - int (*enable_se)(struct nfc_dev *dev, u32 secure_element); - int (*disable_se)(struct nfc_dev *dev, u32 secure_element); int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name); + int (*discover_se)(struct nfc_hci_dev *dev); + int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx); + int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx); }; /* Pipes */ diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 9900c0f5d6bd..5187ec70b66a 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -68,9 +68,12 @@ struct nfc_ops { void *cb_context); int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb); int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target); - int (*enable_se)(struct nfc_dev *dev, u32 secure_element); - int (*disable_se)(struct nfc_dev *dev, u32 secure_element); int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name); + + /* Secure Element API */ + int (*discover_se)(struct nfc_dev *dev); + int (*enable_se)(struct nfc_dev *dev, u32 se_idx); + int (*disable_se)(struct nfc_dev *dev, u32 se_idx); }; #define NFC_TARGET_IDX_ANY -1 diff --git a/net/nfc/core.c b/net/nfc/core.c index 334954a1d6e8..a43a56d7f4be 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -126,6 +126,13 @@ int nfc_dev_up(struct nfc_dev *dev) if (!rc) dev->dev_up = true; + /* We have to enable the device before discovering SEs */ + if (dev->ops->discover_se) { + rc = dev->ops->discover_se(dev); + if (!rc) + pr_warn("SE discovery failed\n"); + } + error: device_unlock(&dev->dev); return rc; diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 9c8a63d341d3..7b1c186736eb 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -692,6 +692,36 @@ static int hci_check_presence(struct nfc_dev *nfc_dev, return hdev->ops->check_presence(hdev, target); } +static int hci_discover_se(struct nfc_dev *nfc_dev) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->ops->discover_se) + return hdev->ops->discover_se(hdev); + + return 0; +} + +static int hci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->ops->enable_se) + return hdev->ops->enable_se(hdev, se_idx); + + return 0; +} + +static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) +{ + struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); + + if (hdev->ops->disable_se) + return hdev->ops->enable_se(hdev, se_idx); + + return 0; +} + static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err) { mutex_lock(&hdev->msg_tx_mutex); @@ -802,6 +832,9 @@ static struct nfc_ops hci_nfc_ops = { .tm_send = hci_tm_send, .check_presence = hci_check_presence, .fw_upload = hci_fw_upload, + .discover_se = hci_discover_se, + .enable_se = hci_enable_se, + .disable_se = hci_disable_se, }; struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 145bad15e113..b943d46a1644 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -636,6 +636,21 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, return rc; } +static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx) +{ + return 0; +} + +static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) +{ + return 0; +} + +static int nci_discover_se(struct nfc_dev *nfc_dev) +{ + return 0; +} + static struct nfc_ops nci_nfc_ops = { .dev_up = nci_dev_up, .dev_down = nci_dev_down, @@ -646,6 +661,9 @@ static struct nfc_ops nci_nfc_ops = { .activate_target = nci_activate_target, .deactivate_target = nci_deactivate_target, .im_transceive = nci_transceive, + .enable_se = nci_enable_se, + .disable_se = nci_disable_se, + .discover_se = nci_discover_se, }; /* ---- Interface to NCI drivers ---- */ -- cgit v1.2.3 From fed7c25ec0d4894edfc36bbe5c5231e52f45483a Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 10 May 2013 15:28:38 +0200 Subject: NFC: Add secure elements addition and removal API This API will allow NFC drivers to add and remove the secure elements they know about or detect. Typically this should be called (asynchronously or not) from the driver or the host interface stack detect_se hook. Signed-off-by: Samuel Ortiz --- include/net/nfc/nfc.h | 22 +++++++++++++++++++++- include/uapi/linux/nfc.h | 4 +++- net/nfc/core.c | 45 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 68 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 5187ec70b66a..0e353f1658bb 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -97,6 +97,23 @@ struct nfc_target { u8 logical_idx; }; +/** + * nfc_se - A structure for NFC accessible secure elements. + * + * @idx: The secure element index. User space will enable or + * disable a secure element by its index. + * @type: The secure element type. It can be SE_UICC or + * SE_EMBEDDED. + * @state: The secure element state, either enabled or disabled. + * + */ +struct nfc_se { + struct list_head list; + u32 idx; + u16 type; + u16 state; +}; + struct nfc_genl_data { u32 poll_req_portid; struct mutex genl_data_mutex; @@ -118,7 +135,7 @@ struct nfc_dev { struct nfc_genl_data genl_data; u32 supported_protocols; - u32 active_se; + struct list_head secure_elements; int tx_headroom; int tx_tailroom; @@ -221,4 +238,7 @@ int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb); void nfc_driver_failure(struct nfc_dev *dev, int err); +int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type); +int nfc_remove_se(struct nfc_dev *dev, u32 se_idx); + #endif /* __NET_NFC_H */ diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index fb304fb774cc..3a57cef0b986 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -199,10 +199,12 @@ enum nfc_sdp_attr { #define NFC_PROTO_ISO14443_B_MASK (1 << NFC_PROTO_ISO14443_B) /* NFC Secure Elements */ -#define NFC_SE_NONE 0x0 #define NFC_SE_UICC 0x1 #define NFC_SE_EMBEDDED 0x2 +#define NFC_SE_DISABLED 0x0 +#define NFC_SE_ENABLED 0x1 + struct sockaddr_nfc { sa_family_t sa_family; __u32 dev_idx; diff --git a/net/nfc/core.c b/net/nfc/core.c index a43a56d7f4be..dacadfbcacea 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -760,6 +760,49 @@ inline void nfc_driver_failure(struct nfc_dev *dev, int err) } EXPORT_SYMBOL(nfc_driver_failure); +int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type) +{ + struct nfc_se *se, *n; + + pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); + + list_for_each_entry_safe(se, n, &dev->secure_elements, list) + if (se->idx == se_idx) + return -EALREADY; + + se = kzalloc(sizeof(struct nfc_se), GFP_KERNEL); + if (!se) + return -ENOMEM; + + se->idx = se_idx; + se->type = type; + se->state = NFC_SE_DISABLED; + INIT_LIST_HEAD(&se->list); + + list_add(&se->list, &dev->secure_elements); + + return 0; +} +EXPORT_SYMBOL(nfc_add_se); + +int nfc_remove_se(struct nfc_dev *dev, u32 se_idx) +{ + struct nfc_se *se, *n; + + pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); + + list_for_each_entry_safe(se, n, &dev->secure_elements, list) + if (se->idx == se_idx) { + list_del(&se->list); + kfree(se); + + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL(nfc_remove_se); + static void nfc_release(struct device *d) { struct nfc_dev *dev = to_nfc_dev(d); @@ -856,9 +899,9 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, dev->ops = ops; dev->supported_protocols = supported_protocols; - dev->active_se = NFC_SE_NONE; dev->tx_headroom = tx_headroom; dev->tx_tailroom = tx_tailroom; + INIT_LIST_HEAD(&dev->secure_elements); nfc_genl_data_init(&dev->genl_data); -- cgit v1.2.3 From 2757c3723c3d2b13e3a8bfaa034826f64e9cca43 Mon Sep 17 00:00:00 2001 From: Samuel Ortiz Date: Fri, 10 May 2013 15:47:37 +0200 Subject: NFC: Send netlink events for secure elements additions and removals When an NFC driver or host controller stack discovers a secure element, it will call nfc_add_se(). In order for userspace applications to use these secure elements, a netlink event will then be sent with the SE index and its type. With that information userspace applications can decide wether or not to enable SEs, through their indexes. Signed-off-by: Samuel Ortiz --- include/uapi/linux/nfc.h | 6 +++++ net/nfc/core.c | 14 +++++++++++ net/nfc/netlink.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++ net/nfc/nfc.h | 3 +++ 4 files changed, 86 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 3a57cef0b986..caed0f324d5f 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -95,6 +95,8 @@ enum nfc_commands { NFC_CMD_LLC_SDREQ, NFC_EVENT_LLC_SDRES, NFC_CMD_FW_UPLOAD, + NFC_EVENT_SE_ADDED, + NFC_EVENT_SE_REMOVED, /* private: internal use only */ __NFC_CMD_AFTER_LAST }; @@ -125,6 +127,8 @@ enum nfc_commands { * @NFC_ATTR_LLC_PARAM_MIUX: MIU eXtension parameter * @NFC_ATTR_SE: Available Secure Elements * @NFC_ATTR_FIRMWARE_NAME: Free format firmware version + * @NFC_ATTR_SE_INDEX: Secure element index + * @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED) */ enum nfc_attrs { NFC_ATTR_UNSPEC, @@ -148,6 +152,8 @@ enum nfc_attrs { NFC_ATTR_SE, NFC_ATTR_LLC_SDP, NFC_ATTR_FIRMWARE_NAME, + NFC_ATTR_SE_INDEX, + NFC_ATTR_SE_TYPE, /* private: internal use only */ __NFC_ATTR_AFTER_LAST }; diff --git a/net/nfc/core.c b/net/nfc/core.c index dacadfbcacea..bb5f16cfc201 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -763,6 +763,7 @@ EXPORT_SYMBOL(nfc_driver_failure); int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type) { struct nfc_se *se, *n; + int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); @@ -781,6 +782,14 @@ int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type) list_add(&se->list, &dev->secure_elements); + rc = nfc_genl_se_added(dev, se_idx, type); + if (rc < 0) { + list_del(&se->list); + kfree(se); + + return rc; + } + return 0; } EXPORT_SYMBOL(nfc_add_se); @@ -788,11 +797,16 @@ EXPORT_SYMBOL(nfc_add_se); int nfc_remove_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se, *n; + int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); list_for_each_entry_safe(se, n, &dev->secure_elements, list) if (se->idx == se_idx) { + rc = nfc_genl_se_removed(dev, se_idx); + if (rc < 0) + return rc; + list_del(&se->list); kfree(se); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index fdbc662c564a..8a11a3a27e69 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -426,6 +426,69 @@ free_msg: return rc; } +int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, + NFC_EVENT_SE_ADDED); + if (!hdr) + goto free_msg; + + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || + nla_put_u8(msg, NFC_ATTR_SE_TYPE, type)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); +free_msg: + nlmsg_free(msg); + return -EMSGSIZE; +} + +int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, + NFC_EVENT_SE_REMOVED); + if (!hdr) + goto free_msg; + + if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || + nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); +free_msg: + nlmsg_free(msg); + return -EMSGSIZE; +} + static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, u32 portid, u32 seq, struct netlink_callback *cb, diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index cf0c48165996..a6aeee094aa4 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -94,6 +94,9 @@ int nfc_genl_tm_deactivated(struct nfc_dev *dev); int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list); +int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type); +int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx); + struct nfc_dev *nfc_get_device(unsigned int idx); static inline void nfc_put_device(struct nfc_dev *dev) -- cgit v1.2.3 From 45bfa52e36ef016b789eca73c7847db3f7b4742d Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Wed, 12 Jun 2013 15:57:10 -0700 Subject: openvswitch: Fix struct comment. Signed-off-by: Pravin B Shelar Signed-off-by: Jesse Gross --- include/uapi/linux/openvswitch.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 405918dd7b3f..424672db7f12 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -192,7 +192,6 @@ enum ovs_vport_type { * optional; if not specified a free port number is automatically selected. * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type * of vport. - * and other attributes are ignored. * * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to * look up the vport to operate on; otherwise dp_idx from the &struct -- cgit v1.2.3 From e570bd0472b10c9f982fdb58eb39c81445cddb5c Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 17 Jun 2013 19:56:20 +0200 Subject: ssb: add struct for serial flash MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This data allow writing for example MTD driver. Signed-off-by: Rafał Miłecki Signed-off-by: John W. Linville --- drivers/ssb/driver_chipcommon_sflash.c | 7 +++++++ include/linux/ssb/ssb_driver_mips.h | 15 +++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'include') diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c index 1b9e770f5328..205f1c499c46 100644 --- a/drivers/ssb/driver_chipcommon_sflash.c +++ b/drivers/ssb/driver_chipcommon_sflash.c @@ -73,6 +73,7 @@ static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode) /* Initialize serial flash access */ int ssb_sflash_init(struct ssb_chipcommon *cc) { + struct ssb_sflash *sflash = &cc->dev->bus->mipscore.sflash; const struct ssb_sflash_tbl_e *e; u32 id, id2; @@ -131,6 +132,12 @@ int ssb_sflash_init(struct ssb_chipcommon *cc) return -ENOTSUPP; } + sflash->window = SSB_FLASH2; + sflash->blocksize = e->blocksize; + sflash->numblocks = e->numblocks; + sflash->size = sflash->blocksize * sflash->numblocks; + sflash->present = true; + pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n", e->name, e->blocksize, e->numblocks); diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h index afe79d40a99e..6535e4718fde 100644 --- a/include/linux/ssb/ssb_driver_mips.h +++ b/include/linux/ssb/ssb_driver_mips.h @@ -20,6 +20,18 @@ struct ssb_pflash { u32 window_size; }; +#ifdef CONFIG_SSB_SFLASH +struct ssb_sflash { + bool present; + u32 window; + u32 blocksize; + u16 numblocks; + u32 size; + + void *priv; +}; +#endif + struct ssb_mipscore { struct ssb_device *dev; @@ -27,6 +39,9 @@ struct ssb_mipscore { struct ssb_serial_port serial_ports[4]; struct ssb_pflash pflash; +#ifdef CONFIG_SSB_SFLASH + struct ssb_sflash sflash; +#endif }; extern void ssb_mipscore_init(struct ssb_mipscore *mcore); -- cgit v1.2.3 From eb6db622825b2028df74f490b8c36887cf3c2f50 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Fri, 14 Jun 2013 16:33:25 +0300 Subject: net: change sysctl_net_ll_poll into an unsigned int There is no reason for sysctl_net_ll_poll to be an unsigned long. Change it into an unsigned int. Fix the proc handler. Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- include/net/ll_poll.h | 5 +++-- net/core/sysctl_net_core.c | 4 ++-- net/socket.c | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index bc262f88173f..44e2f707cb9f 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -34,7 +34,7 @@ #ifdef CONFIG_NET_LL_RX_POLL struct napi_struct; -extern unsigned long sysctl_net_ll_poll __read_mostly; +extern unsigned int sysctl_net_ll_poll __read_mostly; /* return values from ndo_ll_poll */ #define LL_FLUSH_FAILED -1 @@ -45,7 +45,8 @@ extern unsigned long sysctl_net_ll_poll __read_mostly; static inline cycles_t ll_end_time(void) { - return TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll) + get_cycles(); + return (cycles_t)TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll) + + get_cycles(); } static inline bool sk_valid_ll(struct sock *sk) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 78c746e016ae..62702c2053de 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -302,9 +302,9 @@ static struct ctl_table net_core_table[] = { { .procname = "low_latency_poll", .data = &sysctl_net_ll_poll, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_doulongvec_minmax + .proc_handler = proc_dointvec }, #endif #endif /* CONFIG_NET */ diff --git a/net/socket.c b/net/socket.c index 21fd29f63ed2..caaffa14e87e 100644 --- a/net/socket.c +++ b/net/socket.c @@ -107,7 +107,7 @@ #include #ifdef CONFIG_NET_LL_RX_POLL -unsigned long sysctl_net_ll_poll __read_mostly; +unsigned int sysctl_net_ll_poll __read_mostly; EXPORT_SYMBOL_GPL(sysctl_net_ll_poll); #endif -- cgit v1.2.3 From 9a3c71aa802499e0b1db2788ccc75a56c5f00555 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Fri, 14 Jun 2013 16:33:35 +0300 Subject: net: convert low latency sockets to sched_clock() Use sched_clock() instead of get_cycles(). We can use sched_clock() because we don't care much about accuracy. Remove the dependency on X86_TSC Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- include/net/ll_poll.h | 33 +++++++++++++++++---------------- net/Kconfig | 1 - 2 files changed, 17 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 44e2f707cb9f..6930cbd943e2 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -21,10 +21,6 @@ * e1000-devel Mailing List */ -/* - * For now this depends on CONFIG_X86_TSC - */ - #ifndef _LINUX_NET_LL_POLL_H #define _LINUX_NET_LL_POLL_H @@ -40,13 +36,19 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; #define LL_FLUSH_FAILED -1 #define LL_FLUSH_BUSY -2 -/* we don't mind a ~2.5% imprecision */ -#define TSC_MHZ (tsc_khz >> 10) - -static inline cycles_t ll_end_time(void) +/* we can use sched_clock() because we don't care much about precision + * we only care that the average is bounded + */ +static inline u64 ll_end_time(void) { - return (cycles_t)TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll) - + get_cycles(); + u64 end_time = ACCESS_ONCE(sysctl_net_ll_poll); + + /* we don't mind a ~2.5% imprecision + * sysctl_net_ll_poll is a u_int so this can't overflow + */ + end_time = (end_time << 10) + sched_clock(); + + return end_time; } static inline bool sk_valid_ll(struct sock *sk) @@ -55,16 +57,15 @@ static inline bool sk_valid_ll(struct sock *sk) !need_resched() && !signal_pending(current); } -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { - return !time_after((unsigned long)get_cycles(), - (unsigned long)end_time); + return !time_after64(sched_clock(), end_time); } static inline bool sk_poll_ll(struct sock *sk, int nonblock) { - cycles_t end_time = ll_end_time(); const struct net_device_ops *ops; + u64 end_time = ll_end_time(); struct napi_struct *napi; int rc = false; @@ -117,7 +118,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) #else /* CONFIG_NET_LL_RX_POLL */ -static inline cycles_t ll_end_time(void) +static inline u64 ll_end_time(void) { return 0; } @@ -140,7 +141,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) { } -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { return false; } diff --git a/net/Kconfig b/net/Kconfig index d6a9ce6e1800..e591668fb38f 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -245,7 +245,6 @@ config NETPRIO_CGROUP config NET_LL_RX_POLL bool "Low Latency Receive Poll" - depends on X86_TSC default n ---help--- Support Low Latency Receive Queue Poll. -- cgit v1.2.3 From dafcc4380deec21d160c31411f33c8813f67f517 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Fri, 14 Jun 2013 16:33:57 +0300 Subject: net: add socket option for low latency polling adds a socket option for low latency polling. This allows overriding the global sysctl value with a per-socket one. Unexport sysctl_net_ll_poll since for now it's not needed in modules. Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- arch/alpha/include/uapi/asm/socket.h | 2 ++ arch/avr32/include/uapi/asm/socket.h | 2 ++ arch/cris/include/uapi/asm/socket.h | 2 ++ arch/frv/include/uapi/asm/socket.h | 2 ++ arch/h8300/include/uapi/asm/socket.h | 2 ++ arch/ia64/include/uapi/asm/socket.h | 2 ++ arch/m32r/include/uapi/asm/socket.h | 2 ++ arch/mips/include/uapi/asm/socket.h | 2 ++ arch/mn10300/include/uapi/asm/socket.h | 2 ++ arch/parisc/include/uapi/asm/socket.h | 2 ++ arch/powerpc/include/uapi/asm/socket.h | 2 ++ arch/s390/include/uapi/asm/socket.h | 2 ++ arch/sparc/include/uapi/asm/socket.h | 2 ++ arch/xtensa/include/uapi/asm/socket.h | 2 ++ include/net/ll_poll.h | 12 ++++++------ include/net/sock.h | 2 ++ include/uapi/asm-generic/socket.h | 2 ++ net/core/sock.c | 20 ++++++++++++++++++++ net/socket.c | 1 - 19 files changed, 58 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index eee6ea76bdaf..4885825e498d 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -81,4 +81,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h index 37401f535126..79b61798ebf8 100644 --- a/arch/avr32/include/uapi/asm/socket.h +++ b/arch/avr32/include/uapi/asm/socket.h @@ -74,4 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* __ASM_AVR32_SOCKET_H */ diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h index ba409c9947bc..47b1ec55092d 100644 --- a/arch/cris/include/uapi/asm/socket.h +++ b/arch/cris/include/uapi/asm/socket.h @@ -76,6 +76,8 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index 31dbb5d8e13d..dbc08520f22c 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -74,5 +74,7 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h index 5d1c6d0870e6..a38d38a6520b 100644 --- a/arch/h8300/include/uapi/asm/socket.h +++ b/arch/h8300/include/uapi/asm/socket.h @@ -74,4 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 6b4329f18b29..d3358b760681 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -83,4 +83,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 2a3b59e0e171..44aaf4639a4a 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -74,4 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 3b211507be7f..6a07992ba6c6 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -92,4 +92,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index b4ce844c9391..db80fd3e398b 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -74,4 +74,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 70c512a386f7..f866fff9a004 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -73,6 +73,8 @@ #define SO_SELECT_ERR_QUEUE 0x4026 +#define SO_LL 0x4027 + /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index a36daf3c6f9a..405fb09bda94 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -81,4 +81,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 2dacb306835c..0c5105fbaaf3 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -80,4 +80,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 89f49b68a21c..b46c3fa0b265 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -70,6 +70,8 @@ #define SO_SELECT_ERR_QUEUE 0x0029 +#define SO_LL 0x0030 + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index a8f44f50e651..b21ace4fc9ba 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -85,4 +85,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* _XTENSA_SOCKET_H */ diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 6930cbd943e2..fcc7c365cee5 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -39,12 +39,12 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; /* we can use sched_clock() because we don't care much about precision * we only care that the average is bounded */ -static inline u64 ll_end_time(void) +static inline u64 ll_end_time(struct sock *sk) { - u64 end_time = ACCESS_ONCE(sysctl_net_ll_poll); + u64 end_time = ACCESS_ONCE(sk->sk_ll_usec); /* we don't mind a ~2.5% imprecision - * sysctl_net_ll_poll is a u_int so this can't overflow + * sk->sk_ll_usec is a u_int so this can't overflow */ end_time = (end_time << 10) + sched_clock(); @@ -53,7 +53,7 @@ static inline u64 ll_end_time(void) static inline bool sk_valid_ll(struct sock *sk) { - return sysctl_net_ll_poll && sk->sk_napi_id && + return sk->sk_ll_usec && sk->sk_napi_id && !need_resched() && !signal_pending(current); } @@ -65,7 +65,7 @@ static inline bool can_poll_ll(u64 end_time) static inline bool sk_poll_ll(struct sock *sk, int nonblock) { const struct net_device_ops *ops; - u64 end_time = ll_end_time(); + u64 end_time = ll_end_time(sk); struct napi_struct *napi; int rc = false; @@ -118,7 +118,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) #else /* CONFIG_NET_LL_RX_POLL */ -static inline u64 ll_end_time(void) +static inline u64 ll_end_time(struct sock *sk) { return 0; } diff --git a/include/net/sock.h b/include/net/sock.h index ac8e1818380c..21db792bffa5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -230,6 +230,7 @@ struct cg_proto; * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward * @sk_napi_id: id of the last napi context to receive data for sk + * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, @@ -328,6 +329,7 @@ struct sock { #endif #ifdef CONFIG_NET_LL_RX_POLL unsigned int sk_napi_id; + unsigned int sk_ll_usec; #endif atomic_t sk_drops; int sk_rcvbuf; diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index c5d2e3a1cf68..ca3a20d772ac 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -76,4 +76,6 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_LL 46 + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/net/core/sock.c b/net/core/sock.c index 788c0da5eed1..1e744b12fda3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -913,6 +913,19 @@ set_rcvbuf: sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); break; +#ifdef CONFIG_NET_LL_RX_POLL + case SO_LL: + /* allow unprivileged users to decrease the value */ + if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) + ret = -EPERM; + else { + if (val < 0) + ret = -EINVAL; + else + sk->sk_ll_usec = val; + } + break; +#endif default: ret = -ENOPROTOOPT; break; @@ -1170,6 +1183,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); break; +#ifdef CONFIG_NET_LL_RX_POLL + case SO_LL: + v.val = sk->sk_ll_usec; + break; +#endif + default: return -ENOPROTOOPT; } @@ -2288,6 +2307,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) #ifdef CONFIG_NET_LL_RX_POLL sk->sk_napi_id = 0; + sk->sk_ll_usec = sysctl_net_ll_poll; #endif /* diff --git a/net/socket.c b/net/socket.c index caaffa14e87e..3eec3f76b49c 100644 --- a/net/socket.c +++ b/net/socket.c @@ -108,7 +108,6 @@ #ifdef CONFIG_NET_LL_RX_POLL unsigned int sysctl_net_ll_poll __read_mostly; -EXPORT_SYMBOL_GPL(sysctl_net_ll_poll); #endif static int sock_no_open(struct inode *irrelevant, struct file *dontcare); -- cgit v1.2.3 From 8941bbcd572a8860ad03c76e2f3d1dafa820b842 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Mon, 17 Jun 2013 10:54:36 -0400 Subject: tipc: update code comments to reflect new uapi header path Files tipc.h and tipc_config.h were moved to uapi directory, but the corresponding comments were not updated at the same time. Signed-off-by: Ying Xue Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller --- include/uapi/linux/tipc.h | 2 +- include/uapi/linux/tipc_config.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h index f2d90091cc20..852373d27dbb 100644 --- a/include/uapi/linux/tipc.h +++ b/include/uapi/linux/tipc.h @@ -1,5 +1,5 @@ /* - * include/linux/tipc.h: Include file for TIPC socket interface + * include/uapi/linux/tipc.h: Header for TIPC socket interface * * Copyright (c) 2003-2006, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h index 0b1e3f218a36..6b0bff09b3a7 100644 --- a/include/uapi/linux/tipc_config.h +++ b/include/uapi/linux/tipc_config.h @@ -1,5 +1,5 @@ /* - * include/linux/tipc_config.h: Include file for TIPC configuration interface + * include/uapi/linux/tipc_config.h: Header for TIPC configuration interface * * Copyright (c) 2003-2006, Ericsson AB * Copyright (c) 2005-2007, 2010-2011, Wind River Systems -- cgit v1.2.3 From 939cfa75a0cea97aa60cb88e3722baefdceb4e72 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 17 Jun 2013 11:40:04 +0200 Subject: net: sctp: get rid of t_new macro for kzalloc t_new rather obfuscates things where everyone else is using actual function names instead of that macro, so replace it with kzalloc, which is the function t_new wraps. Signed-off-by: Daniel Borkmann Acked-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 3 --- net/sctp/associola.c | 2 +- net/sctp/bind_addr.c | 2 +- net/sctp/endpointola.c | 3 ++- net/sctp/ipv6.c | 2 +- net/sctp/protocol.c | 2 +- net/sctp/transport.c | 2 +- 7 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index cd89510eab2a..b9f136ad5a5a 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -575,9 +575,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\ /* Round an int up to the next multiple of 4. */ #define WORD_ROUND(s) (((s)+3)&~3) -/* Make a new instance of type. */ -#define t_new(type, flags) kzalloc(sizeof(type), flags) - /* Compare two timevals. */ #define tv_lt(s, t) \ (s.tv_sec < t.tv_sec || (s.tv_sec == t.tv_sec && s.tv_usec < t.tv_usec)) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 756025c98e85..bf6e6bd553c0 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -355,7 +355,7 @@ struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, { struct sctp_association *asoc; - asoc = t_new(struct sctp_association, gfp); + asoc = kzalloc(sizeof(*asoc), gfp); if (!asoc) goto fail; diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 41145fe31813..64977ea0f9c5 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -162,7 +162,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, struct sctp_sockaddr_entry *addr; /* Add the address to the bind address list. */ - addr = t_new(struct sctp_sockaddr_entry, gfp); + addr = kzalloc(sizeof(*addr), gfp); if (!addr) return -ENOMEM; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 5fbd7bc6bb11..a8b26741c0af 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -192,9 +192,10 @@ struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) struct sctp_endpoint *ep; /* Build a local endpoint. */ - ep = t_new(struct sctp_endpoint, gfp); + ep = kzalloc(sizeof(*ep), gfp); if (!ep) goto fail; + if (!sctp_endpoint_init(ep, sk, gfp)) goto fail_init; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index fffc7b62a9a8..4f3e13b31fcc 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -402,7 +402,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, read_lock_bh(&in6_dev->lock); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { /* Add the address to the local list. */ - addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index eaee00c61139..fad7d1b67be5 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -153,7 +153,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { /* Add the address to the local list. */ - addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v4.sin_family = AF_INET; addr->a.v4.sin_port = 0; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 098f1d5f769e..5d3c71bbd197 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -116,7 +116,7 @@ struct sctp_transport *sctp_transport_new(struct net *net, { struct sctp_transport *transport; - transport = t_new(struct sctp_transport, gfp); + transport = kzalloc(sizeof(*transport), gfp); if (!transport) goto fail; -- cgit v1.2.3 From dda9192851dcf904b4d1095480834f2a4f814ae3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 17 Jun 2013 11:40:05 +0200 Subject: net: sctp: remove SCTP_STATIC macro SCTP_STATIC is just another define for the static keyword. It's use is inconsistent in the SCTP code anyway and it was introduced in the initial implementation of SCTP in 2.5. We have a regression suite in lksctp-tools, but this is for user space only, so noone makes use of this macro anymore. The kernel test suite for 2.5 is incompatible with the current SCTP code anyway. So simply Remove it, to be more consistent with the rest of the kernel code. Signed-off-by: Daniel Borkmann Acked-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 8 ----- net/sctp/chunk.c | 2 +- net/sctp/input.c | 4 +-- net/sctp/ipv6.c | 4 +-- net/sctp/protocol.c | 4 +-- net/sctp/sm_make_chunk.c | 10 +++---- net/sctp/socket.c | 78 ++++++++++++++++++++++-------------------------- net/sctp/tsnmap.c | 10 +++---- net/sctp/ulpevent.c | 10 +++---- 9 files changed, 57 insertions(+), 73 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index b9f136ad5a5a..6321c081118f 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -99,14 +99,6 @@ #define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT #endif - -/* Certain internal static functions need to be exported when - * compiled into the test frame. - */ -#ifndef SCTP_STATIC -#define SCTP_STATIC static -#endif - /* * Function declarations. */ diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 69ce21e3716f..7135fc0c087a 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -66,7 +66,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg) } /* Allocate and initialize datamsg. */ -SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) +static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) { struct sctp_datamsg *msg; msg = kmalloc(sizeof(struct sctp_datamsg), gfp); diff --git a/net/sctp/input.c b/net/sctp/input.c index 6533d81a638d..4cfc74699a3f 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -903,11 +903,11 @@ hit: } /* Look up an association. BH-safe. */ -SCTP_STATIC +static struct sctp_association *sctp_lookup_association(struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr, - struct sctp_transport **transportp) + struct sctp_transport **transportp) { struct sctp_association *asoc; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 4f3e13b31fcc..adeaa0e64f52 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -145,8 +145,8 @@ static struct notifier_block sctp_inet6addr_notifier = { }; /* ICMP error handler. */ -SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, __be32 info) +static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + u8 type, u8 code, int offset, __be32 info) { struct inet6_dev *idev; struct sock *sk; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index fad7d1b67be5..57b568c38ef6 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1312,7 +1312,7 @@ static struct pernet_operations sctp_net_ops = { }; /* Initialize the universe into something sensible. */ -SCTP_STATIC __init int sctp_init(void) +static __init int sctp_init(void) { int i; int status = -EINVAL; @@ -1499,7 +1499,7 @@ err_chunk_cachep: } /* Exit handler for the SCTP protocol. */ -SCTP_STATIC __exit void sctp_exit(void) +static __exit void sctp_exit(void) { /* BUG. This should probably do something useful like clean * up all the remaining associations and all that memory. diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index cf579e71cff0..fc8548743ed5 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -68,9 +68,8 @@ #include #include -SCTP_STATIC -struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen); +static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen); static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, @@ -1353,9 +1352,8 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) /* Create a new chunk, setting the type and flags headers from the * arguments, reserving enough space for a 'paylen' byte payload. */ -SCTP_STATIC -struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen) +static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen) { struct sctp_chunk *retval; sctp_chunkhdr_t *chunk_hdr; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 510dc79a32a1..75fe92ac2e9c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -84,11 +84,6 @@ #include #include -/* WARNING: Please do not remove the SCTP_STATIC attribute to - * any of the functions below as they are used to export functions - * used by a project regression testsuite. - */ - /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); @@ -279,7 +274,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ -SCTP_STATIC int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) +static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) { int retval = 0; @@ -333,7 +328,7 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, } /* Bind a local address either to an endpoint or to an association. */ -SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) +static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { struct net *net = sock_net(sk); struct sctp_sock *sp = sctp_sk(sk); @@ -964,9 +959,9 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) * * Returns 0 if ok, <0 errno code on error. */ -SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, - struct sockaddr __user *addrs, - int addrs_size, int op) +static int sctp_setsockopt_bindx(struct sock* sk, + struct sockaddr __user *addrs, + int addrs_size, int op) { struct sockaddr *kaddrs; int err; @@ -1312,7 +1307,7 @@ out_free: * * Returns >=0 if ok, <0 errno code on error. */ -SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk, +static int __sctp_setsockopt_connectx(struct sock* sk, struct sockaddr __user *addrs, int addrs_size, sctp_assoc_t *assoc_id) @@ -1350,9 +1345,9 @@ SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk, * This is an older interface. It's kept for backward compatibility * to the option that doesn't provide association id. */ -SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk, - struct sockaddr __user *addrs, - int addrs_size) +static int sctp_setsockopt_connectx_old(struct sock* sk, + struct sockaddr __user *addrs, + int addrs_size) { return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); } @@ -1363,9 +1358,9 @@ SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk, * indication to the call. Error is always negative and association id is * always positive. */ -SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, - struct sockaddr __user *addrs, - int addrs_size) +static int sctp_setsockopt_connectx(struct sock* sk, + struct sockaddr __user *addrs, + int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; @@ -1386,9 +1381,9 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, * addrs_num structure member. That way we can re-use the existing * code. */ -SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, - char __user *optval, - int __user *optlen) +static int sctp_getsockopt_connectx3(struct sock* sk, int len, + char __user *optval, + int __user *optlen) { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; @@ -1464,7 +1459,7 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, * shutdown phase does not finish during this period, close() will * return but the graceful shutdown phase continues in the system. */ -SCTP_STATIC void sctp_close(struct sock *sk, long timeout) +static void sctp_close(struct sock *sk, long timeout) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; @@ -1573,10 +1568,10 @@ static int sctp_error(struct sock *sk, int flags, int err) */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ -SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); +static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); -SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t msg_len) +static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, + struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; @@ -2034,9 +2029,9 @@ static int sctp_skb_pull(struct sk_buff *skb, int len) */ static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); -SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, + struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); @@ -3565,8 +3560,8 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, * optval - the buffer to store the value of the option. * optlen - the size of the buffer. */ -SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +static int sctp_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen) { int retval = 0; @@ -3725,8 +3720,8 @@ out_nounlock: * * len: the size of the address. */ -SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr, - int addr_len) +static int sctp_connect(struct sock *sk, struct sockaddr *addr, + int addr_len) { int err = 0; struct sctp_af *af; @@ -3752,7 +3747,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr, } /* FIXME: Write comments. */ -SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags) +static int sctp_disconnect(struct sock *sk, int flags) { return -EOPNOTSUPP; /* STUB */ } @@ -3764,7 +3759,7 @@ SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags) * descriptor will be returned from accept() to represent the newly * formed association. */ -SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err) +static struct sock *sctp_accept(struct sock *sk, int flags, int *err) { struct sctp_sock *sp; struct sctp_endpoint *ep; @@ -3817,7 +3812,7 @@ out: } /* The SCTP ioctl handler. */ -SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; @@ -3859,7 +3854,7 @@ out: * initialized the SCTP-specific portion of the sock. * The sock structure should already be zero-filled memory. */ -SCTP_STATIC int sctp_init_sock(struct sock *sk) +static int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); struct sctp_sock *sp; @@ -3993,7 +3988,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) } /* Cleanup any SCTP per socket resources. */ -SCTP_STATIC void sctp_destroy_sock(struct sock *sk) +static void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp; @@ -4028,7 +4023,7 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) * Disables further send and receive operations * and initiates the SCTP shutdown sequence. */ -SCTP_STATIC void sctp_shutdown(struct sock *sk, int how) +static void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; @@ -5700,8 +5695,8 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, return 0; } -SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) +static int sctp_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen) { int retval = 0; int len; @@ -6046,7 +6041,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum) /* * Move a socket to LISTENING state. */ -SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) +static int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; @@ -6333,8 +6328,7 @@ static int sctp_autobind(struct sock *sk) * msg_control * points here */ -SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg, - sctp_cmsgs_t *cmsgs) +static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) { struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 396c45174e5b..b46019568a86 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c @@ -161,8 +161,8 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn, /* Initialize a Gap Ack Block iterator from memory being provided. */ -SCTP_STATIC void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map, - struct sctp_tsnmap_iter *iter) +static void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map, + struct sctp_tsnmap_iter *iter) { /* Only start looking one past the Cumulative TSN Ack Point. */ iter->start = map->cumulative_tsn_ack_point + 1; @@ -171,9 +171,9 @@ SCTP_STATIC void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map, /* Get the next Gap Ack Blocks. Returns 0 if there was not another block * to get. */ -SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, - struct sctp_tsnmap_iter *iter, - __u16 *start, __u16 *end) +static int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, + struct sctp_tsnmap_iter *iter, + __u16 *start, __u16 *end) { int ended = 0; __u16 start_ = 0, end_ = 0, offset; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 10c018a5b9fe..44a45dbee4df 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -57,9 +57,9 @@ static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event); /* Initialize an ULP event from an given skb. */ -SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, - int msg_flags, - unsigned int len) +static void sctp_ulpevent_init(struct sctp_ulpevent *event, + int msg_flags, + unsigned int len) { memset(event, 0, sizeof(struct sctp_ulpevent)); event->msg_flags = msg_flags; @@ -67,8 +67,8 @@ SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, } /* Create a new sctp_ulpevent. */ -SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, - gfp_t gfp) +static struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, + gfp_t gfp) { struct sctp_ulpevent *event; struct sk_buff *skb; -- cgit v1.2.3 From 2f301ab29e4656af824592363039d8f6bd5a9f68 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Thu, 16 May 2013 13:00:28 +0200 Subject: nl80211/cfg80211: add 5 and 10 MHz defines and wiphy flag Add defines for 5 and 10 MHz channel width and fix channel handling functions accordingly. Also check for and report the WIPHY_FLAG_SUPPORTS_5_10_MHZ capability. Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer [fix spelling in comment] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 4 ++++ net/wireless/chan.c | 57 +++++++++++++++++++++++++++++++++++++------- net/wireless/nl80211.c | 21 ++++++++++++---- 4 files changed, 72 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 6a43c34ce96f..316f34bb8e6e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2342,6 +2342,7 @@ struct cfg80211_ops { * responds to probe-requests in hardware. * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX. * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call. + * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. */ enum wiphy_flags { WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), @@ -2365,6 +2366,7 @@ enum wiphy_flags { WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19), WIPHY_FLAG_OFFCHAN_TX = BIT(20), WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), + WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index ca6facf4df0c..861e5eba3953 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2758,6 +2758,8 @@ enum nl80211_channel_type { * and %NL80211_ATTR_CENTER_FREQ2 attributes must be provided as well * @NL80211_CHAN_WIDTH_160: 160 MHz channel, the %NL80211_ATTR_CENTER_FREQ1 * attribute must be provided as well + * @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel + * @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel */ enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT, @@ -2766,6 +2768,8 @@ enum nl80211_chan_width { NL80211_CHAN_WIDTH_80, NL80211_CHAN_WIDTH_80P80, NL80211_CHAN_WIDTH_160, + NL80211_CHAN_WIDTH_5, + NL80211_CHAN_WIDTH_10, }; /** diff --git a/net/wireless/chan.c b/net/wireless/chan.c index fd556ac05fdb..50f6195c8b70 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -54,6 +54,8 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) control_freq = chandef->chan->center_freq; switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: if (chandef->center_freq1 != control_freq) @@ -152,6 +154,12 @@ static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) int width; switch (c->width) { + case NL80211_CHAN_WIDTH_5: + width = 5; + break; + case NL80211_CHAN_WIDTH_10: + width = 10; + break; case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: width = 20; @@ -194,6 +202,16 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1, if (c1->width == c2->width) return NULL; + /* + * can't be compatible if one of them is 5 or 10 MHz, + * but they don't have the same width. + */ + if (c1->width == NL80211_CHAN_WIDTH_5 || + c1->width == NL80211_CHAN_WIDTH_10 || + c2->width == NL80211_CHAN_WIDTH_5 || + c2->width == NL80211_CHAN_WIDTH_10) + return NULL; + if (c1->width == NL80211_CHAN_WIDTH_20_NOHT || c1->width == NL80211_CHAN_WIDTH_20) return c2; @@ -264,11 +282,17 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy, u32 bandwidth) { struct ieee80211_channel *c; - u32 freq; + u32 freq, start_freq, end_freq; + + if (bandwidth <= 20) { + start_freq = center_freq; + end_freq = center_freq; + } else { + start_freq = center_freq - bandwidth/2 + 10; + end_freq = center_freq + bandwidth/2 - 10; + } - for (freq = center_freq - bandwidth/2 + 10; - freq <= center_freq + bandwidth/2 - 10; - freq += 20) { + for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c) return -EINVAL; @@ -310,11 +334,17 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, u32 prohibited_flags) { struct ieee80211_channel *c; - u32 freq; + u32 freq, start_freq, end_freq; + + if (bandwidth <= 20) { + start_freq = center_freq; + end_freq = center_freq; + } else { + start_freq = center_freq - bandwidth/2 + 10; + end_freq = center_freq + bandwidth/2 - 10; + } - for (freq = center_freq - bandwidth/2 + 10; - freq <= center_freq + bandwidth/2 - 10; - freq += 20) { + for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c) return false; @@ -349,6 +379,12 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, control_freq = chandef->chan->center_freq; switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + width = 5; + break; + case NL80211_CHAN_WIDTH_10: + width = 10; + break; case NL80211_CHAN_WIDTH_20: if (!ht_cap->ht_supported) return false; @@ -405,6 +441,11 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, if (width > 20) prohibited_flags |= IEEE80211_CHAN_NO_OFDM; + /* 5 and 10 MHz are only defined for the OFDM PHY */ + if (width < 20) + prohibited_flags |= IEEE80211_CHAN_NO_OFDM; + + if (!cfg80211_secondary_chans_ok(wiphy, chandef->center_freq1, width, prohibited_flags)) return false; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 1c4f7daea6c7..4ab1ffa9df11 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1188,6 +1188,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev, if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) && nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) && + nla_put_flag(msg, WIPHY_FLAG_SUPPORTS_5_10_MHZ)) + goto nla_put_failure; (*split_start)++; if (split) @@ -1731,6 +1734,11 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, IEEE80211_CHAN_DISABLED)) return -EINVAL; + if ((chandef->width == NL80211_CHAN_WIDTH_5 || + chandef->width == NL80211_CHAN_WIDTH_10) && + !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ)) + return -EINVAL; + return 0; } @@ -6280,11 +6288,16 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef)) return -EINVAL; - if (ibss.chandef.width > NL80211_CHAN_WIDTH_40) - return -EINVAL; - if (ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && - !(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)) + switch (ibss.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + break; + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + if (rdev->wiphy.features & NL80211_FEATURE_HT_IBSS) + break; + default: return -EINVAL; + } ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; -- cgit v1.2.3 From 30e747326378caec9ad13515bc9bde2e41b1fee3 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Thu, 16 May 2013 13:00:29 +0200 Subject: nl80211: add rate flags for 5/10 Mhz channels Signed-off-by: Simon Wunderlich Signed-off-by: Mathias Kretschmer Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 316f34bb8e6e..e3a39fc9a296 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -188,6 +188,8 @@ struct ieee80211_channel { * when used with 802.11g (on the 2.4 GHz band); filled by the * core code when registering the wiphy. * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode. + * @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode + * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode */ enum ieee80211_rate_flags { IEEE80211_RATE_SHORT_PREAMBLE = 1<<0, @@ -195,6 +197,8 @@ enum ieee80211_rate_flags { IEEE80211_RATE_MANDATORY_B = 1<<2, IEEE80211_RATE_MANDATORY_G = 1<<3, IEEE80211_RATE_ERP_G = 1<<4, + IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5, + IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6, }; /** @@ -432,6 +436,30 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, u32 prohibited_flags); +/** + * ieee80211_chandef_rate_flags - returns rate flags for a channel + * + * In some channel types, not all rates may be used - for example CCK + * rates may not be used in 5/10 MHz channels. + * + * @chandef: channel definition for the channel + * + * Returns: rate flags which apply for this channel + */ +static inline enum ieee80211_rate_flags +ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + return IEEE80211_RATE_SUPPORTS_5MHZ; + case NL80211_CHAN_WIDTH_10: + return IEEE80211_RATE_SUPPORTS_10MHZ; + default: + break; + } + return 0; +} + /** * enum survey_info_flags - survey information flags * -- cgit v1.2.3 From 959867fa55d0cb55fb3d08656e5e62607167617f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 19 Jun 2013 13:05:42 +0200 Subject: cfg80211: require passing BSS struct back to cfg80211_assoc_timeout Doing so will allow us to hold the BSS (not just ref it) over the association process, thus ensuring that it doesn't time out and gets invisible to the user (e.g. in 'iw wlan0 link'.) This also fixes a leak in mac80211 where it doesn't always release the BSS struct properly in all cases where calling this function. This leak was reported by Ben Greear. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 16 ++++++++-------- net/mac80211/mlme.c | 15 +++++++-------- net/wireless/mlme.c | 8 +++++--- 3 files changed, 20 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index e3a39fc9a296..7b0730aeb892 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1459,7 +1459,8 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie); * This structure provides information needed to complete IEEE 802.11 * authentication. * - * @bss: The BSS to authenticate with. + * @bss: The BSS to authenticate with, the callee must obtain a reference + * to it if it needs to keep it. * @auth_type: Authentication type (algorithm) * @ie: Extra IEs to add to Authentication frame or %NULL * @ie_len: Length of ie buffer in octets @@ -1497,11 +1498,10 @@ enum cfg80211_assoc_req_flags { * * This structure provides information needed to complete IEEE 802.11 * (re)association. - * @bss: The BSS to associate with. If the call is successful the driver - * is given a reference that it must release, normally via a call to - * cfg80211_send_rx_assoc(), or, if association timed out, with a - * call to cfg80211_put_bss() (in addition to calling - * cfg80211_send_assoc_timeout()) + * @bss: The BSS to associate with. If the call is successful the driver is + * given a reference that it must give back to cfg80211_send_rx_assoc() + * or to cfg80211_assoc_timeout(). To ensure proper refcounting, new + * association requests while already associating must be rejected. * @ie: Extra IEs to add to (Re)Association Request frame or %NULL * @ie_len: Length of ie buffer in octets * @use_mfp: Use management frame protection (IEEE 802.11w) in this association @@ -3522,11 +3522,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, /** * cfg80211_assoc_timeout - notification of timed out association * @dev: network device - * @addr: The MAC address of the device with which the association timed out + * @bss: The BSS entry with which association timed out. * * This function may sleep. The caller must hold the corresponding wdev's mutex. */ -void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr); +void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss); /** * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 34d54fe81483..ae31968d42d3 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2795,8 +2795,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) { /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, false); - cfg80211_put_bss(sdata->local->hw.wiphy, bss); - cfg80211_assoc_timeout(sdata->dev, mgmt->bssid); + cfg80211_assoc_timeout(sdata->dev, bss); return; } sdata_info(sdata, "associated\n"); @@ -3513,13 +3512,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) time_after(jiffies, ifmgd->assoc_data->timeout)) { if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) || ieee80211_do_assoc(sdata)) { - u8 bssid[ETH_ALEN]; - - memcpy(bssid, ifmgd->assoc_data->bss->bssid, ETH_ALEN); + struct cfg80211_bss *bss = ifmgd->assoc_data->bss; ieee80211_destroy_assoc_data(sdata, false); - - cfg80211_assoc_timeout(sdata->dev, bssid); + cfg80211_assoc_timeout(sdata->dev, bss); } } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) run_again(sdata, ifmgd->assoc_data->timeout); @@ -4445,8 +4441,11 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) cancel_work_sync(&ifmgd->chswitch_work); sdata_lock(sdata); - if (ifmgd->assoc_data) + if (ifmgd->assoc_data) { + struct cfg80211_bss *bss = ifmgd->assoc_data->bss; ieee80211_destroy_assoc_data(sdata, false); + cfg80211_assoc_timeout(sdata->dev, bss); + } if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); del_timer_sync(&ifmgd->timer); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index a61a44bc6cf0..dd6f79d7bd2e 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -131,16 +131,18 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr) } EXPORT_SYMBOL(cfg80211_auth_timeout); -void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr) +void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); - trace_cfg80211_send_assoc_timeout(dev, addr); + trace_cfg80211_send_assoc_timeout(dev, bss->bssid); - nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); + nl80211_send_assoc_timeout(rdev, dev, bss->bssid, GFP_KERNEL); cfg80211_sme_assoc_timeout(wdev); + + cfg80211_put_bss(wiphy, bss); } EXPORT_SYMBOL(cfg80211_assoc_timeout); -- cgit v1.2.3 From bda7bb46343647f68591366731295a0f3eea59ed Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 17 Jun 2013 17:49:38 -0700 Subject: gre: Allow multiple protocol listener for gre protocol. Currently there is only one user is allowed to register for gre protocol. Following patch adds de-multiplexer. So that multiple modules can listen on gre protocol e.g. kernel gre devices and ovs. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/net/gre.h | 24 ++++++ net/ipv4/gre.c | 221 +++++++++++++++++++++++++++++++++++++++++++++++++++++- net/ipv4/ip_gre.c | 173 +++++++----------------------------------- 3 files changed, 267 insertions(+), 151 deletions(-) (limited to 'include') diff --git a/include/net/gre.h b/include/net/gre.h index 9f03a390c826..c6ea0c72c605 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -7,6 +7,7 @@ #define GREPROTO_CISCO 0 #define GREPROTO_PPTP 1 #define GREPROTO_MAX 2 +#define GRE_IP_PROTO_MAX 2 struct gre_protocol { int (*handler)(struct sk_buff *skb); @@ -22,6 +23,29 @@ struct gre_base_hdr { int gre_add_protocol(const struct gre_protocol *proto, u8 version); int gre_del_protocol(const struct gre_protocol *proto, u8 version); +struct gre_cisco_protocol { + int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi); + int (*err_handler)(struct sk_buff *skb, u32 info, + const struct tnl_ptk_info *tpi); + u8 priority; +}; + +int gre_cisco_register(struct gre_cisco_protocol *proto); +int gre_cisco_unregister(struct gre_cisco_protocol *proto); + +static inline int ip_gre_calc_hlen(__be16 o_flags) +{ + int addend = 4; + + if (o_flags&TUNNEL_CSUM) + addend += 4; + if (o_flags&TUNNEL_KEY) + addend += 4; + if (o_flags&TUNNEL_SEQ) + addend += 4; + return addend; +} + static inline __be16 gre_flags_to_tnl_flags(__be16 flags) { __be16 tflags = 0; diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c index 1e294d510ac3..8b9a373890ab 100644 --- a/net/ipv4/gre.c +++ b/net/ipv4/gre.c @@ -13,6 +13,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include +#include #include #include #include @@ -24,8 +26,12 @@ #include #include +#include +#include +#include static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; +static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX]; int gre_add_protocol(const struct gre_protocol *proto, u8 version) { @@ -55,6 +61,173 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) } EXPORT_SYMBOL_GPL(gre_del_protocol); +static __sum16 check_checksum(struct sk_buff *skb) +{ + __sum16 csum = 0; + + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + csum = csum_fold(skb->csum); + + if (!csum) + break; + /* Fall through. */ + + case CHECKSUM_NONE: + skb->csum = 0; + csum = __skb_checksum_complete(skb); + skb->ip_summed = CHECKSUM_COMPLETE; + break; + } + + return csum; +} + +static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, + bool *csum_err) +{ + unsigned int ip_hlen = ip_hdrlen(skb); + const struct gre_base_hdr *greh; + __be32 *options; + int hdr_len; + + if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) + return -EINVAL; + + greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); + if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) + return -EINVAL; + + tpi->flags = gre_flags_to_tnl_flags(greh->flags); + hdr_len = ip_gre_calc_hlen(tpi->flags); + + if (!pskb_may_pull(skb, hdr_len)) + return -EINVAL; + + greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); + tpi->proto = greh->protocol; + + options = (__be32 *)(greh + 1); + if (greh->flags & GRE_CSUM) { + if (check_checksum(skb)) { + *csum_err = true; + return -EINVAL; + } + options++; + } + + if (greh->flags & GRE_KEY) { + tpi->key = *options; + options++; + } else + tpi->key = 0; + + if (unlikely(greh->flags & GRE_SEQ)) { + tpi->seq = *options; + options++; + } else + tpi->seq = 0; + + /* WCCP version 1 and 2 protocol decoding. + * - Change protocol to IP + * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header + */ + if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { + tpi->proto = htons(ETH_P_IP); + if ((*(u8 *)options & 0xF0) != 0x40) { + hdr_len += 4; + if (!pskb_may_pull(skb, hdr_len)) + return -EINVAL; + } + } + return 0; +} + +static int gre_cisco_rcv(struct sk_buff *skb) +{ + struct tnl_ptk_info tpi; + int i; + bool csum_err = false; + + if (parse_gre_header(skb, &tpi, &csum_err) < 0) + goto drop; + + rcu_read_lock(); + for (i = 0; i < GRE_IP_PROTO_MAX; i++) { + struct gre_cisco_protocol *proto; + int ret; + + proto = rcu_dereference(gre_cisco_proto_list[i]); + if (!proto) + continue; + ret = proto->handler(skb, &tpi); + if (ret == PACKET_RCVD) { + rcu_read_unlock(); + return 0; + } + } + rcu_read_unlock(); + + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); +drop: + kfree_skb(skb); + return 0; +} + +static void gre_cisco_err(struct sk_buff *skb, u32 info) +{ + /* All the routers (except for Linux) return only + * 8 bytes of packet payload. It means, that precise relaying of + * ICMP in the real Internet is absolutely infeasible. + * + * Moreover, Cisco "wise men" put GRE key to the third word + * in GRE header. It makes impossible maintaining even soft + * state for keyed + * GRE tunnels with enabled checksum. Tell them "thank you". + * + * Well, I wonder, rfc1812 was written by Cisco employee, + * what the hell these idiots break standards established + * by themselves??? + */ + + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; + struct tnl_ptk_info tpi; + bool csum_err = false; + int i; + + if (parse_gre_header(skb, &tpi, &csum_err)) { + if (!csum_err) /* ignore csum errors. */ + return; + } + + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { + ipv4_update_pmtu(skb, dev_net(skb->dev), info, + skb->dev->ifindex, 0, IPPROTO_GRE, 0); + return; + } + if (type == ICMP_REDIRECT) { + ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, + IPPROTO_GRE, 0); + return; + } + + rcu_read_lock(); + for (i = 0; i < GRE_IP_PROTO_MAX; i++) { + struct gre_cisco_protocol *proto; + + proto = rcu_dereference(gre_cisco_proto_list[i]); + if (!proto) + continue; + + if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD) + goto out; + + } +out: + rcu_read_unlock(); +} + static int gre_rcv(struct sk_buff *skb) { const struct gre_protocol *proto; @@ -206,27 +379,68 @@ static const struct net_offload gre_offload = { }, }; +static const struct gre_protocol ipgre_protocol = { + .handler = gre_cisco_rcv, + .err_handler = gre_cisco_err, +}; + +int gre_cisco_register(struct gre_cisco_protocol *newp) +{ + struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **) + &gre_cisco_proto_list[newp->priority]; + + return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY; +} +EXPORT_SYMBOL_GPL(gre_cisco_register); + +int gre_cisco_unregister(struct gre_cisco_protocol *del_proto) +{ + struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **) + &gre_cisco_proto_list[del_proto->priority]; + int ret; + + ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL; + + if (ret) + return ret; + + synchronize_net(); + return 0; +} +EXPORT_SYMBOL_GPL(gre_cisco_unregister); + static int __init gre_init(void) { pr_info("GRE over IPv4 demultiplexor driver\n"); if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { pr_err("can't add protocol\n"); - return -EAGAIN; + goto err; + } + + if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) { + pr_info("%s: can't add ipgre handler\n", __func__); + goto err_gre; } if (inet_add_offload(&gre_offload, IPPROTO_GRE)) { pr_err("can't add protocol offload\n"); - inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); - return -EAGAIN; + goto err_gso; } return 0; +err_gso: + gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); +err_gre: + inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); +err: + return -EAGAIN; } static void __exit gre_exit(void) { inet_del_offload(&gre_offload, IPPROTO_GRE); + gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); } @@ -236,4 +450,3 @@ module_exit(gre_exit); MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver"); MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); MODULE_LICENSE("GPL"); - diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index a982657d05e7..19863a81cea1 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -121,103 +121,8 @@ static int ipgre_tunnel_init(struct net_device *dev); static int ipgre_net_id __read_mostly; static int gre_tap_net_id __read_mostly; -static __sum16 check_checksum(struct sk_buff *skb) -{ - __sum16 csum = 0; - - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - csum = csum_fold(skb->csum); - - if (!csum) - break; - /* Fall through. */ - - case CHECKSUM_NONE: - skb->csum = 0; - csum = __skb_checksum_complete(skb); - skb->ip_summed = CHECKSUM_COMPLETE; - break; - } - - return csum; -} - -static int ip_gre_calc_hlen(__be16 o_flags) -{ - int addend = 4; - - if (o_flags&TUNNEL_CSUM) - addend += 4; - if (o_flags&TUNNEL_KEY) - addend += 4; - if (o_flags&TUNNEL_SEQ) - addend += 4; - return addend; -} - -static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err, int *hdr_len) -{ - unsigned int ip_hlen = ip_hdrlen(skb); - const struct gre_base_hdr *greh; - __be32 *options; - - if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) - return -EINVAL; - - greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); - if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) - return -EINVAL; - - tpi->flags = gre_flags_to_tnl_flags(greh->flags); - *hdr_len = ip_gre_calc_hlen(tpi->flags); - - if (!pskb_may_pull(skb, *hdr_len)) - return -EINVAL; - - greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); - - tpi->proto = greh->protocol; - - options = (__be32 *)(greh + 1); - if (greh->flags & GRE_CSUM) { - if (check_checksum(skb)) { - *csum_err = true; - return -EINVAL; - } - options++; - } - - if (greh->flags & GRE_KEY) { - tpi->key = *options; - options++; - } else - tpi->key = 0; - - if (unlikely(greh->flags & GRE_SEQ)) { - tpi->seq = *options; - options++; - } else - tpi->seq = 0; - - /* WCCP version 1 and 2 protocol decoding. - * - Change protocol to IP - * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header - */ - if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { - tpi->proto = htons(ETH_P_IP); - if ((*(u8 *)options & 0xF0) != 0x40) { - *hdr_len += 4; - if (!pskb_may_pull(skb, *hdr_len)) - return -EINVAL; - } - } - - return 0; -} - -static void ipgre_err(struct sk_buff *skb, u32 info) +static int ipgre_err(struct sk_buff *skb, u32 info, + const struct tnl_ptk_info *tpi) { /* All the routers (except for Linux) return only @@ -239,26 +144,18 @@ static void ipgre_err(struct sk_buff *skb, u32 info) const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; - struct tnl_ptk_info tpi; - int hdr_len; - bool csum_err = false; - - if (parse_gre_header(skb, &tpi, &csum_err, &hdr_len)) { - if (!csum_err) /* ignore csum errors. */ - return; - } switch (type) { default: case ICMP_PARAMETERPROB: - return; + return PACKET_RCVD; case ICMP_DEST_UNREACH: switch (code) { case ICMP_SR_FAILED: case ICMP_PORT_UNREACH: /* Impossible event. */ - return; + return PACKET_RCVD; default: /* All others are translated to HOST_UNREACH. rfc2003 contains "deep thoughts" about NET_UNREACH, @@ -269,79 +166,61 @@ static void ipgre_err(struct sk_buff *skb, u32 info) break; case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) - return; + return PACKET_RCVD; break; case ICMP_REDIRECT: break; } - if (tpi.proto == htons(ETH_P_TEB)) + if (tpi->proto == htons(ETH_P_TEB)) itn = net_generic(net, gre_tap_net_id); else itn = net_generic(net, ipgre_net_id); iph = (const struct iphdr *)skb->data; - t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags, - iph->daddr, iph->saddr, tpi.key); + t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, + iph->daddr, iph->saddr, tpi->key); if (t == NULL) - return; + return PACKET_REJECT; - if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { - ipv4_update_pmtu(skb, dev_net(skb->dev), info, - t->parms.link, 0, IPPROTO_GRE, 0); - return; - } - if (type == ICMP_REDIRECT) { - ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, - IPPROTO_GRE, 0); - return; - } if (t->parms.iph.daddr == 0 || ipv4_is_multicast(t->parms.iph.daddr)) - return; + return PACKET_RCVD; if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) - return; + return PACKET_RCVD; if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) t->err_count++; else t->err_count = 1; t->err_time = jiffies; + return PACKET_RCVD; } -static int ipgre_rcv(struct sk_buff *skb) +static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { struct net *net = dev_net(skb->dev); struct ip_tunnel_net *itn; const struct iphdr *iph; struct ip_tunnel *tunnel; - struct tnl_ptk_info tpi; - int hdr_len; - bool csum_err = false; - - if (parse_gre_header(skb, &tpi, &csum_err, &hdr_len) < 0) - goto drop; - if (tpi.proto == htons(ETH_P_TEB)) + if (tpi->proto == htons(ETH_P_TEB)) itn = net_generic(net, gre_tap_net_id); else itn = net_generic(net, ipgre_net_id); iph = ip_hdr(skb); - tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags, - iph->saddr, iph->daddr, tpi.key); + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, + iph->saddr, iph->daddr, tpi->key); if (tunnel) { - ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); - return 0; + ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); + return PACKET_RCVD; } - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); -drop: - kfree_skb(skb); - return 0; + return PACKET_REJECT; } static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb) @@ -708,9 +587,10 @@ static int ipgre_tunnel_init(struct net_device *dev) return ip_tunnel_init(dev); } -static const struct gre_protocol ipgre_protocol = { - .handler = ipgre_rcv, - .err_handler = ipgre_err, +static struct gre_cisco_protocol ipgre_protocol = { + .handler = ipgre_rcv, + .err_handler = ipgre_err, + .priority = 0, }; static int __net_init ipgre_init_net(struct net *net) @@ -978,7 +858,7 @@ static int __init ipgre_init(void) if (err < 0) goto pnet_tap_faied; - err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); + err = gre_cisco_register(&ipgre_protocol); if (err < 0) { pr_info("%s: can't add protocol\n", __func__); goto add_proto_failed; @@ -997,7 +877,7 @@ static int __init ipgre_init(void) tap_ops_failed: rtnl_link_unregister(&ipgre_link_ops); rtnl_link_failed: - gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); + gre_cisco_unregister(&ipgre_protocol); add_proto_failed: unregister_pernet_device(&ipgre_tap_net_ops); pnet_tap_faied: @@ -1009,8 +889,7 @@ static void __exit ipgre_fini(void) { rtnl_link_unregister(&ipgre_tap_ops); rtnl_link_unregister(&ipgre_link_ops); - if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) - pr_info("%s: can't remove protocol\n", __func__); + gre_cisco_unregister(&ipgre_protocol); unregister_pernet_device(&ipgre_tap_net_ops); unregister_pernet_device(&ipgre_net_ops); } -- cgit v1.2.3 From 752f36da68e9136df8918461d651723a43627e04 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 17 Jun 2013 17:49:45 -0700 Subject: gre: export gre_build_header() function. This is required for ovs gre module. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/net/gre.h | 2 ++ net/ipv4/gre.c | 32 ++++++++++++++++++++++++++++++++ net/ipv4/ip_gre.c | 40 +--------------------------------------- 3 files changed, 35 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/include/net/gre.h b/include/net/gre.h index c6ea0c72c605..cbb9d51d82c1 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -32,6 +32,8 @@ struct gre_cisco_protocol { int gre_cisco_register(struct gre_cisco_protocol *proto); int gre_cisco_unregister(struct gre_cisco_protocol *proto); +void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, + int hdr_len); static inline int ip_gre_calc_hlen(__be16 o_flags) { diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c index 8b9a373890ab..1cbc46536d1f 100644 --- a/net/ipv4/gre.c +++ b/net/ipv4/gre.c @@ -61,6 +61,38 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) } EXPORT_SYMBOL_GPL(gre_del_protocol); +void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, + int hdr_len) +{ + struct gre_base_hdr *greh; + + skb_push(skb, hdr_len); + + greh = (struct gre_base_hdr *)skb->data; + greh->flags = tnl_flags_to_gre_flags(tpi->flags); + greh->protocol = tpi->proto; + + if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) { + __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); + + if (tpi->flags&TUNNEL_SEQ) { + *ptr = tpi->seq; + ptr--; + } + if (tpi->flags&TUNNEL_KEY) { + *ptr = tpi->key; + ptr--; + } + if (tpi->flags&TUNNEL_CSUM && + !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) { + *ptr = 0; + *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, + skb->len, 0)); + } + } +} +EXPORT_SYMBOL_GPL(gre_build_header); + static __sum16 check_checksum(struct sk_buff *skb) { __sum16 csum = 0; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 19863a81cea1..362c7c4c13ca 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -248,40 +248,6 @@ error: return ERR_PTR(err); } -static struct sk_buff *gre_build_header(struct sk_buff *skb, - const struct tnl_ptk_info *tpi, - int hdr_len) -{ - struct gre_base_hdr *greh; - - skb_push(skb, hdr_len); - - greh = (struct gre_base_hdr *)skb->data; - greh->flags = tnl_flags_to_gre_flags(tpi->flags); - greh->protocol = tpi->proto; - - if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) { - __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); - - if (tpi->flags&TUNNEL_SEQ) { - *ptr = tpi->seq; - ptr--; - } - if (tpi->flags&TUNNEL_KEY) { - *ptr = tpi->key; - ptr--; - } - if (tpi->flags&TUNNEL_CSUM && - !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) { - *(__sum16 *)ptr = 0; - *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, - skb->len, 0)); - } - } - - return skb; -} - static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) @@ -302,11 +268,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ - skb = gre_build_header(skb, &tpi, tunnel->hlen); - if (unlikely(!skb)) { - dev->stats.tx_dropped++; - return; - } + gre_build_header(skb, &tpi, tunnel->hlen); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); } -- cgit v1.2.3 From 45f2e9976cb6fc3f1cc533fd53fe74da5a9dbce4 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 17 Jun 2013 17:49:51 -0700 Subject: gre: export gre_handle_offloads() function. This is required for OVS GRE offloading. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/net/gre.h | 1 + net/ipv4/gre.c | 29 +++++++++++++++++++++++++++++ net/ipv4/ip_gre.c | 34 ++-------------------------------- 3 files changed, 32 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/net/gre.h b/include/net/gre.h index cbb9d51d82c1..a5a4ddf05300 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -34,6 +34,7 @@ int gre_cisco_register(struct gre_cisco_protocol *proto); int gre_cisco_unregister(struct gre_cisco_protocol *proto); void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, int hdr_len); +struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum); static inline int ip_gre_calc_hlen(__be16 o_flags) { diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c index 1cbc46536d1f..5ecc9c49b4dc 100644 --- a/net/ipv4/gre.c +++ b/net/ipv4/gre.c @@ -93,6 +93,35 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, } EXPORT_SYMBOL_GPL(gre_build_header); +struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum) +{ + int err; + + if (likely(!skb->encapsulation)) { + skb_reset_inner_headers(skb); + skb->encapsulation = 1; + } + + if (skb_is_gso(skb)) { + err = skb_unclone(skb, GFP_ATOMIC); + if (unlikely(err)) + goto error; + skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; + return skb; + } else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) { + err = skb_checksum_help(skb); + if (unlikely(err)) + goto error; + } else if (skb->ip_summed != CHECKSUM_PARTIAL) + skb->ip_summed = CHECKSUM_NONE; + + return skb; +error: + kfree_skb(skb); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(gre_handle_offloads); + static __sum16 check_checksum(struct sk_buff *skb) { __sum16 csum = 0; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 362c7c4c13ca..c326e869993a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -223,31 +223,6 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) return PACKET_REJECT; } -static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb) -{ - int err; - - if (skb_is_gso(skb)) { - err = skb_unclone(skb, GFP_ATOMIC); - if (unlikely(err)) - goto error; - skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; - return skb; - } else if (skb->ip_summed == CHECKSUM_PARTIAL && - tunnel->parms.o_flags&TUNNEL_CSUM) { - err = skb_checksum_help(skb); - if (unlikely(err)) - goto error; - } else if (skb->ip_summed != CHECKSUM_PARTIAL) - skb->ip_summed = CHECKSUM_NONE; - - return skb; - -error: - kfree_skb(skb); - return ERR_PTR(err); -} - static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) @@ -255,11 +230,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, struct ip_tunnel *tunnel = netdev_priv(dev); struct tnl_ptk_info tpi; - if (likely(!skb->encapsulation)) { - skb_reset_inner_headers(skb); - skb->encapsulation = 1; - } - tpi.flags = tunnel->parms.o_flags; tpi.proto = proto; tpi.key = tunnel->parms.o_key; @@ -279,7 +249,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tnl_params; - skb = handle_offloads(tunnel, skb); + skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (IS_ERR(skb)) goto out; @@ -318,7 +288,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, { struct ip_tunnel *tunnel = netdev_priv(dev); - skb = handle_offloads(tunnel, skb); + skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (IS_ERR(skb)) goto out; -- cgit v1.2.3 From 0e6fbc5b6c6218987c93b8c7ca60cf786062899d Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 17 Jun 2013 17:49:56 -0700 Subject: ip_tunnels: extend iptunnel_xmit() Refactor various ip tunnels xmit functions and extend iptunnel_xmit() so that there is more code sharing. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 32 +++++------------ include/net/ip_tunnels.h | 26 ++++++++------ net/ipv4/Makefile | 2 +- net/ipv4/ip_tunnel.c | 38 +++++--------------- net/ipv4/ip_tunnel_core.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++ net/ipv6/sit.c | 39 ++++++--------------- 6 files changed, 131 insertions(+), 94 deletions(-) create mode 100644 net/ipv4/ip_tunnel_core.c (limited to 'include') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f6dce13c8f89..284c6c00c353 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1021,7 +1021,6 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *vxlan = netdev_priv(dev); struct rtable *rt; const struct iphdr *old_iph; - struct iphdr *iph; struct vxlanhdr *vxh; struct udphdr *uh; struct flowi4 fl4; @@ -1030,6 +1029,7 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, u32 vni; __be16 df = 0; __u8 tos, ttl; + int err; dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; vni = rdst->remote_vni; @@ -1097,13 +1097,6 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vxlan_encap_bypass(skb, vxlan, dst_vxlan); return NETDEV_TX_OK; } - - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | - IPSKB_REROUTED); - skb_dst_drop(skb); - skb_dst_set(skb, &rt->dst); - vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = htonl(vni << 8); @@ -1118,27 +1111,18 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, uh->len = htons(skb->len); uh->check = 0; - __skb_push(skb, sizeof(*iph)); - skb_reset_network_header(skb); - iph = ip_hdr(skb); - iph->version = 4; - iph->ihl = sizeof(struct iphdr) >> 2; - iph->frag_off = df; - iph->protocol = IPPROTO_UDP; - iph->tos = ip_tunnel_ecn_encap(tos, old_iph, skb); - iph->daddr = dst; - iph->saddr = fl4.saddr; - iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - tunnel_ip_select_ident(skb, old_iph, &rt->dst); - - nf_reset(skb); - vxlan_set_owner(dev, skb); if (handle_offloads(skb)) goto drop; - iptunnel_xmit(skb, dev); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + + err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst, + IPPROTO_UDP, tos, ttl, df); + iptunnel_xmit_stats(err, &dev->stats, dev->tstats); + return NETDEV_TX_OK; drop: diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 1be442f89406..b84f1ab09d78 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -155,23 +155,27 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb, (skb_shinfo(skb)->gso_segs ?: 1) - 1); } -static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev) +int iptunnel_xmit(struct net *net, struct rtable *rt, + struct sk_buff *skb, + __be32 src, __be32 dst, __u8 proto, + __u8 tos, __u8 ttl, __be16 df); + +static inline void iptunnel_xmit_stats(int err, + struct net_device_stats *err_stats, + struct pcpu_tstats __percpu *stats) { - int err; - int pkt_len = skb->len - skb_transport_offset(skb); - struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); + if (err > 0) { + struct pcpu_tstats *tstats = this_cpu_ptr(stats); - nf_reset(skb); - - err = ip_local_out(skb); - if (likely(net_xmit_eval(err) == 0)) { u64_stats_update_begin(&tstats->syncp); - tstats->tx_bytes += pkt_len; + tstats->tx_bytes += err; tstats->tx_packets++; u64_stats_update_end(&tstats->syncp); + } else if (err < 0) { + err_stats->tx_errors++; + err_stats->tx_aborted_errors++; } else { - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; + err_stats->tx_dropped++; } } #endif /* __NET_IP_TUNNELS_H */ diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 7fcf8101d85f..86ded0bac9c7 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -11,7 +11,7 @@ obj-y := route.o inetpeer.o protocol.o \ tcp_offload.o datagram.o raw.o udp.o udplite.o \ udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \ fib_frontend.o fib_semantics.o fib_trie.o \ - inet_fragment.o ping.o + inet_fragment.o ping.o ip_tunnel_core.o obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index e189db409b0e..a06a2ed49597 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -491,19 +491,17 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *inner_iph; - struct iphdr *iph; struct flowi4 fl4; u8 tos, ttl; __be16 df; struct rtable *rt; /* Route to the other host */ - struct net_device *tdev; /* Device to other host */ unsigned int max_headroom; /* The extra header space needed */ __be32 dst; int mtu; + int err; inner_iph = (const struct iphdr *)skb_inner_network_header(skb); - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); dst = tnl_params->daddr; if (dst == 0) { /* NBMA tunnel */ @@ -571,14 +569,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, dev->stats.tx_carrier_errors++; goto tx_error; } - tdev = rt->dst.dev; - - if (tdev == dev) { + if (rt->dst.dev == dev) { ip_rt_put(rt); dev->stats.collisions++; goto tx_error; } - df = tnl_params->frag_off; if (df) @@ -596,6 +591,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, if (!skb_is_gso(skb) && (inner_iph->frag_off&htons(IP_DF)) && mtu < ntohs(inner_iph->tot_len)) { + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); ip_rt_put(rt); goto tx_error; @@ -646,8 +642,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ttl = ip4_dst_hoplimit(&rt->dst); } - max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr) - + rt->dst.header_len; + max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + + rt->dst.header_len; if (max_headroom > dev->needed_headroom) { dev->needed_headroom = max_headroom; if (skb_cow_head(skb, dev->needed_headroom)) { @@ -657,27 +653,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } } - skb_dst_drop(skb); - skb_dst_set(skb, &rt->dst); - - /* Push down and install the IP header. */ - skb_push(skb, sizeof(struct iphdr)); - skb_reset_network_header(skb); - - iph = ip_hdr(skb); - inner_iph = (const struct iphdr *)skb_inner_network_header(skb); + err = iptunnel_xmit(dev_net(dev), rt, skb, + fl4.saddr, fl4.daddr, protocol, + ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df); + iptunnel_xmit_stats(err, &dev->stats, dev->tstats); - iph->version = 4; - iph->ihl = sizeof(struct iphdr) >> 2; - iph->frag_off = df; - iph->protocol = protocol; - iph->tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); - iph->daddr = fl4.daddr; - iph->saddr = fl4.saddr; - iph->ttl = ttl; - tunnel_ip_select_ident(skb, inner_iph, &rt->dst); - - iptunnel_xmit(skb, dev); return; #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c new file mode 100644 index 000000000000..927687e83f18 --- /dev/null +++ b/net/ipv4/ip_tunnel_core.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2013 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int iptunnel_xmit(struct net *net, struct rtable *rt, + struct sk_buff *skb, + __be32 src, __be32 dst, __u8 proto, + __u8 tos, __u8 ttl, __be16 df) +{ + int pkt_len = skb->len; + struct iphdr *iph; + int err; + + nf_reset(skb); + secpath_reset(skb); + skb->rxhash = 0; + skb_dst_drop(skb); + skb_dst_set(skb, &rt->dst); + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + + /* Push down and install the IP header. */ + __skb_push(skb, sizeof(struct iphdr)); + skb_reset_network_header(skb); + + iph = ip_hdr(skb); + + iph->version = 4; + iph->ihl = sizeof(struct iphdr) >> 2; + iph->frag_off = df; + iph->protocol = proto; + iph->tos = tos; + iph->daddr = dst; + iph->saddr = src; + iph->ttl = ttl; + tunnel_ip_select_ident(skb, + (const struct iphdr *)skb_inner_network_header(skb), + &rt->dst); + + err = ip_local_out(skb); + if (unlikely(net_xmit_eval(err))) + pkt_len = 0; + return pkt_len; +} +EXPORT_SYMBOL_GPL(iptunnel_xmit); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 6b9c1f128eaf..76bb8de435b2 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -723,13 +723,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, __be16 df = tiph->frag_off; struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ - struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; struct flowi4 fl4; int mtu; const struct in6_addr *addr6; int addr_type; + u8 ttl; + int err; if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; @@ -872,34 +873,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, skb = new_skb; iph6 = ipv6_hdr(skb); } - - skb->transport_header = skb->network_header; - skb_push(skb, sizeof(struct iphdr)); - skb_reset_network_header(skb); - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - IPCB(skb)->flags = 0; - skb_dst_drop(skb); - skb_dst_set(skb, &rt->dst); - - /* - * Push down and install the IPIP header. - */ - - iph = ip_hdr(skb); - iph->version = 4; - iph->ihl = sizeof(struct iphdr)>>2; - iph->frag_off = df; - iph->protocol = IPPROTO_IPV6; - iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); - iph->daddr = fl4.daddr; - iph->saddr = fl4.saddr; - - if ((iph->ttl = tiph->ttl) == 0) - iph->ttl = iph6->hop_limit; - - skb->ip_summed = CHECKSUM_NONE; - ip_select_ident(iph, skb_dst(skb), NULL); - iptunnel_xmit(skb, dev); + ttl = tiph->ttl; + if (ttl == 0) + ttl = iph6->hop_limit; + tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); + + err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, + IPPROTO_IPV6, tos, ttl, df); + iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; tx_error_icmp: -- cgit v1.2.3 From 3d7b46cd20e300bd6989fb1f43d46f1b9645816e Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 17 Jun 2013 17:50:02 -0700 Subject: ip_tunnel: push generic protocol handling to ip_tunnel module. Process skb tunnel header before sending packet to protocol handler. this allows code sharing between gre and ovs gre modules. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 1 + net/ipv4/gre.c | 3 ++- net/ipv4/ip_tunnel.c | 30 ++++++------------------------ net/ipv4/ip_tunnel_core.c | 34 ++++++++++++++++++++++++++++++++++ net/ipv4/ipip.c | 6 +++++- net/ipv6/sit.c | 7 ++++++- 6 files changed, 54 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index b84f1ab09d78..32e130b560fa 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -155,6 +155,7 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb, (skb_shinfo(skb)->gso_segs ?: 1) - 1); } +int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); int iptunnel_xmit(struct net *net, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c index 5ecc9c49b4dc..ba4803e609b5 100644 --- a/net/ipv4/gre.c +++ b/net/ipv4/gre.c @@ -201,7 +201,8 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, return -EINVAL; } } - return 0; + + return iptunnel_pull_header(skb, hdr_len, tpi->proto); } static int gre_cisco_rcv(struct sk_buff *skb) diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index a06a2ed49597..bd227e5ea9da 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -408,13 +408,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct iphdr *iph = ip_hdr(skb); int err; - secpath_reset(skb); - - skb->protocol = tpi->proto; - - skb->mac_header = skb->network_header; - __pskb_pull(skb, tunnel->hlen); - skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen); #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { /* Looped back packet, drop it! */ @@ -442,23 +435,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, tunnel->i_seqno = ntohl(tpi->seq) + 1; } - /* Warning: All skb pointers will be invalidated! */ - if (tunnel->dev->type == ARPHRD_ETHER) { - if (!pskb_may_pull(skb, ETH_HLEN)) { - tunnel->dev->stats.rx_length_errors++; - tunnel->dev->stats.rx_errors++; - goto drop; - } - - iph = ip_hdr(skb); - skb->protocol = eth_type_trans(skb, tunnel->dev); - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); - } - - skb->pkt_type = PACKET_HOST; - __skb_tunnel_rx(skb, tunnel->dev); - - skb_reset_network_header(skb); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { if (log_ecn_error) @@ -477,6 +453,12 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); + if (tunnel->dev->type == ARPHRD_ETHER) { + skb->protocol = eth_type_trans(skb, tunnel->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + } else { + skb->dev = tunnel->dev; + } gro_cells_receive(&tunnel->gro_cells, skb); return 0; diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 927687e83f18..7167b08977df 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -86,3 +86,37 @@ int iptunnel_xmit(struct net *net, struct rtable *rt, return pkt_len; } EXPORT_SYMBOL_GPL(iptunnel_xmit); + +int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) +{ + if (unlikely(!pskb_may_pull(skb, hdr_len))) + return -ENOMEM; + + skb_pull_rcsum(skb, hdr_len); + + if (inner_proto == htons(ETH_P_TEB)) { + struct ethhdr *eh = (struct ethhdr *)skb->data; + + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) + return -ENOMEM; + + if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) + skb->protocol = eh->h_proto; + else + skb->protocol = htons(ETH_P_802_2); + + } else { + skb->protocol = inner_proto; + } + + nf_reset(skb); + secpath_reset(skb); + if (!skb->l4_rxhash) + skb->rxhash = 0; + skb_dst_drop(skb); + skb->vlan_tci = 0; + skb_set_queue_mapping(skb, 0); + skb->pkt_type = PACKET_HOST; + return 0; +} +EXPORT_SYMBOL_GPL(iptunnel_pull_header); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 9df7ecd393f2..e6905fbda2a2 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -188,8 +188,12 @@ static int ipip_rcv(struct sk_buff *skb) struct net *net = dev_net(skb->dev); struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); struct ip_tunnel *tunnel; - const struct iphdr *iph = ip_hdr(skb); + const struct iphdr *iph; + if (iptunnel_pull_header(skb, 0, tpi.proto)) + goto drop; + + iph = ip_hdr(skb); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, iph->saddr, iph->daddr, 0); if (tunnel) { diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 76bb8de435b2..6cee844678e2 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -640,9 +640,14 @@ static const struct tnl_ptk_info tpi = { static int ipip_rcv(struct sk_buff *skb) { - const struct iphdr *iph = ip_hdr(skb); + const struct iphdr *iph; struct ip_tunnel *tunnel; + if (iptunnel_pull_header(skb, 0, tpi.proto)) + goto drop; + + iph = ip_hdr(skb); + tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, iph->saddr, iph->daddr); if (tunnel != NULL) { -- cgit v1.2.3 From 9a628224a61bbcd2b50b3ec96e661fbbb49b619a Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 17 Jun 2013 17:50:07 -0700 Subject: ip_tunnel: Add dont fragment flag. This flag will be used by ovs tunneling. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 32e130b560fa..10bbb4273f7d 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -73,6 +73,7 @@ struct ip_tunnel { #define TUNNEL_REC __cpu_to_be16(0x20) #define TUNNEL_VERSION __cpu_to_be16(0x40) #define TUNNEL_NO_KEY __cpu_to_be16(0x80) +#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100) struct tnl_ptk_info { __be16 flags; -- cgit v1.2.3 From 7d5437c709ded4f152cb8b305d17972d6707f20c Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 17 Jun 2013 17:50:18 -0700 Subject: openvswitch: Add tunneling interface. Add ovs tunnel interface for set tunnel action for userspace. Signed-off-by: Pravin B Shelar Acked-by: Jesse Gross Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 18 +++++ net/openvswitch/actions.c | 4 ++ net/openvswitch/datapath.c | 78 +++++++++++++++++++++- net/openvswitch/datapath.h | 3 + net/openvswitch/flow.c | 125 +++++++++++++++++++++++++++++++++++ net/openvswitch/flow.h | 19 ++++++ net/openvswitch/vport-internal_dev.c | 2 +- net/openvswitch/vport-netdev.c | 2 +- net/openvswitch/vport.c | 4 +- net/openvswitch/vport.h | 3 +- 10 files changed, 251 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 424672db7f12..b15a445927d6 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -246,11 +246,29 @@ enum ovs_key_attr { OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */ OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ OVS_KEY_ATTR_SKB_MARK, /* u32 skb mark */ + OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */ + +#ifdef __KERNEL__ + OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */ +#endif __OVS_KEY_ATTR_MAX }; #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) +enum ovs_tunnel_key_attr { + OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */ + OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */ + OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */ + OVS_TUNNEL_KEY_ATTR_TOS, /* u8 Tunnel IP ToS. */ + OVS_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */ + OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */ + OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */ + __OVS_TUNNEL_KEY_ATTR_MAX +}; + +#define OVS_TUNNEL_KEY_ATTR_MAX (__OVS_TUNNEL_KEY_ATTR_MAX - 1) + /** * enum ovs_frag_type - IPv4 and IPv6 fragment type * @OVS_FRAG_TYPE_NONE: Packet is not a fragment. diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 596d6373399d..22c5f399f1cf 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -436,6 +436,10 @@ static int execute_set_action(struct sk_buff *skb, skb->mark = nla_get_u32(nested_attr); break; + case OVS_KEY_ATTR_IPV4_TUNNEL: + OVS_CB(skb)->tun_key = nla_data(nested_attr); + break; + case OVS_KEY_ATTR_ETHERNET: err = set_eth_addr(skb, nla_data(nested_attr)); break; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f14816b80b80..bbd310646bc8 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -362,6 +362,14 @@ static int queue_gso_packets(struct net *net, int dp_ifindex, static size_t key_attr_size(void) { return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ + + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ + + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ + + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ + + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ + + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ + + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ @@ -600,8 +608,30 @@ static int validate_tp_port(const struct sw_flow_key *flow_key) return -EINVAL; } +static int validate_and_copy_set_tun(const struct nlattr *attr, + struct sw_flow_actions **sfa) +{ + struct ovs_key_ipv4_tunnel tun_key; + int err, start; + + err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &tun_key); + if (err) + return err; + + start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET); + if (start < 0) + return start; + + err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key)); + add_nested_action_end(*sfa, start); + + return err; +} + static int validate_set(const struct nlattr *a, - const struct sw_flow_key *flow_key) + const struct sw_flow_key *flow_key, + struct sw_flow_actions **sfa, + bool *set_tun) { const struct nlattr *ovs_key = nla_data(a); int key_type = nla_type(ovs_key); @@ -611,18 +641,27 @@ static int validate_set(const struct nlattr *a, return -EINVAL; if (key_type > OVS_KEY_ATTR_MAX || - nla_len(ovs_key) != ovs_key_lens[key_type]) + (ovs_key_lens[key_type] != nla_len(ovs_key) && + ovs_key_lens[key_type] != -1)) return -EINVAL; switch (key_type) { const struct ovs_key_ipv4 *ipv4_key; const struct ovs_key_ipv6 *ipv6_key; + int err; case OVS_KEY_ATTR_PRIORITY: case OVS_KEY_ATTR_SKB_MARK: case OVS_KEY_ATTR_ETHERNET: break; + case OVS_KEY_ATTR_TUNNEL: + *set_tun = true; + err = validate_and_copy_set_tun(a, sfa); + if (err) + return err; + break; + case OVS_KEY_ATTR_IPV4: if (flow_key->eth.type != htons(ETH_P_IP)) return -EINVAL; @@ -771,7 +810,7 @@ static int validate_and_copy_actions(const struct nlattr *attr, break; case OVS_ACTION_ATTR_SET: - err = validate_set(a, key); + err = validate_set(a, key, sfa, &skip_copy); if (err) return err; break; @@ -993,6 +1032,33 @@ static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) return err; } +static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) +{ + const struct nlattr *ovs_key = nla_data(a); + int key_type = nla_type(ovs_key); + struct nlattr *start; + int err; + + switch (key_type) { + case OVS_KEY_ATTR_IPV4_TUNNEL: + start = nla_nest_start(skb, OVS_ACTION_ATTR_SET); + if (!start) + return -EMSGSIZE; + + err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key)); + if (err) + return err; + nla_nest_end(skb, start); + break; + default: + if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key)) + return -EMSGSIZE; + break; + } + + return 0; +} + static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb) { const struct nlattr *a; @@ -1002,6 +1068,12 @@ static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *s int type = nla_type(a); switch (type) { + case OVS_ACTION_ATTR_SET: + err = set_action_to_attr(a, skb); + if (err) + return err; + break; + case OVS_ACTION_ATTR_SAMPLE: err = sample_action_to_attr(a, skb); if (err) diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 16b840695216..e88ebc2f1c54 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -88,9 +88,12 @@ struct datapath { /** * struct ovs_skb_cb - OVS data in skb CB * @flow: The flow associated with this packet. May be %NULL if no flow. + * @tun_key: Key for the tunnel that encapsulated this packet. NULL if the + * packet is not being tunneled. */ struct ovs_skb_cb { struct sw_flow *flow; + struct ovs_key_ipv4_tunnel *tun_key; }; #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 940d4b803ff5..976a8b766a6a 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -603,6 +604,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, memset(key, 0, sizeof(*key)); key->phy.priority = skb->priority; + if (OVS_CB(skb)->tun_key) + memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key)); key->phy.in_port = in_port; key->phy.skb_mark = skb->mark; @@ -818,6 +821,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), + [OVS_KEY_ATTR_TUNNEL] = -1, }; static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, @@ -955,6 +959,105 @@ static int parse_flow_nlattrs(const struct nlattr *attr, return 0; } +int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, + struct ovs_key_ipv4_tunnel *tun_key) +{ + struct nlattr *a; + int rem; + bool ttl = false; + + memset(tun_key, 0, sizeof(*tun_key)); + + nla_for_each_nested(a, attr, rem) { + int type = nla_type(a); + static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { + [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), + [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), + [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32), + [OVS_TUNNEL_KEY_ATTR_TOS] = 1, + [OVS_TUNNEL_KEY_ATTR_TTL] = 1, + [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, + [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, + }; + + if (type > OVS_TUNNEL_KEY_ATTR_MAX || + ovs_tunnel_key_lens[type] != nla_len(a)) + return -EINVAL; + + switch (type) { + case OVS_TUNNEL_KEY_ATTR_ID: + tun_key->tun_id = nla_get_be64(a); + tun_key->tun_flags |= TUNNEL_KEY; + break; + case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: + tun_key->ipv4_src = nla_get_be32(a); + break; + case OVS_TUNNEL_KEY_ATTR_IPV4_DST: + tun_key->ipv4_dst = nla_get_be32(a); + break; + case OVS_TUNNEL_KEY_ATTR_TOS: + tun_key->ipv4_tos = nla_get_u8(a); + break; + case OVS_TUNNEL_KEY_ATTR_TTL: + tun_key->ipv4_ttl = nla_get_u8(a); + ttl = true; + break; + case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: + tun_key->tun_flags |= TUNNEL_DONT_FRAGMENT; + break; + case OVS_TUNNEL_KEY_ATTR_CSUM: + tun_key->tun_flags |= TUNNEL_CSUM; + break; + default: + return -EINVAL; + + } + } + if (rem > 0) + return -EINVAL; + + if (!tun_key->ipv4_dst) + return -EINVAL; + + if (!ttl) + return -EINVAL; + + return 0; +} + +int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, + const struct ovs_key_ipv4_tunnel *tun_key) +{ + struct nlattr *nla; + + nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); + if (!nla) + return -EMSGSIZE; + + if (tun_key->tun_flags & TUNNEL_KEY && + nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id)) + return -EMSGSIZE; + if (tun_key->ipv4_src && + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src)) + return -EMSGSIZE; + if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst)) + return -EMSGSIZE; + if (tun_key->ipv4_tos && + nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos)) + return -EMSGSIZE; + if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl)) + return -EMSGSIZE; + if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) && + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) + return -EMSGSIZE; + if ((tun_key->tun_flags & TUNNEL_CSUM) && + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) + return -EMSGSIZE; + + nla_nest_end(skb, nla); + return 0; +} + /** * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. * @swkey: receives the extracted flow key. @@ -997,6 +1100,14 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); } + if (attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { + err = ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key); + if (err) + return err; + + attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); + } + /* Data attributes. */ if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) return -EINVAL; @@ -1135,17 +1246,21 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, const struct nlattr *attr) { + struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; const struct nlattr *nla; int rem; flow->key.phy.in_port = DP_MAX_PORTS; flow->key.phy.priority = 0; flow->key.phy.skb_mark = 0; + memset(tun_key, 0, sizeof(flow->key.tun_key)); nla_for_each_nested(nla, attr, rem) { int type = nla_type(nla); if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { + int err; + if (nla_len(nla) != ovs_key_lens[type]) return -EINVAL; @@ -1154,6 +1269,12 @@ int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, flow->key.phy.priority = nla_get_u32(nla); break; + case OVS_KEY_ATTR_TUNNEL: + err = ovs_ipv4_tun_from_nlattr(nla, tun_key); + if (err) + return err; + break; + case OVS_KEY_ATTR_IN_PORT: if (nla_get_u32(nla) >= DP_MAX_PORTS) return -EINVAL; @@ -1180,6 +1301,10 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) goto nla_put_failure; + if (swkey->tun_key.ipv4_dst && + ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key)) + goto nla_put_failure; + if (swkey->phy.in_port != DP_MAX_PORTS && nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) goto nla_put_failure; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index e370f6246ee9..aec5e43f690b 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -40,7 +40,22 @@ struct sw_flow_actions { struct nlattr actions[]; }; +/* Used to memset ovs_key_ipv4_tunnel padding. */ +#define OVS_TUNNEL_KEY_SIZE \ + (offsetof(struct ovs_key_ipv4_tunnel, ipv4_ttl) + \ + FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, ipv4_ttl)) + +struct ovs_key_ipv4_tunnel { + __be64 tun_id; + __be32 ipv4_src; + __be32 ipv4_dst; + u16 tun_flags; + u8 ipv4_tos; + u8 ipv4_ttl; +}; + struct sw_flow_key { + struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */ struct { u32 priority; /* Packet QoS priority. */ u32 skb_mark; /* SKB mark. */ @@ -179,5 +194,9 @@ u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len); struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx); extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1]; +int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, + struct ovs_key_ipv4_tunnel *tun_key); +int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, + const struct ovs_key_ipv4_tunnel *tun_key); #endif /* flow.h */ diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index e284c7e1fec4..98d3edbbc235 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -67,7 +67,7 @@ static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netde static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) { rcu_read_lock(); - ovs_vport_receive(internal_dev_priv(netdev)->vport, skb); + ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL); rcu_read_unlock(); return 0; } diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 40de815b4213..5982f3f62835 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -51,7 +51,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) skb_push(skb, ETH_HLEN); ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN); - ovs_vport_receive(vport, skb); + ovs_vport_receive(vport, skb, NULL); return; error: diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 176d449351eb..413287a1877f 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -325,7 +325,8 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) * Must be called with rcu_read_lock. The packet cannot be shared and * skb->data should point to the Ethernet header. */ -void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) +void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, + struct ovs_key_ipv4_tunnel *tun_key) { struct pcpu_tstats *stats; @@ -335,6 +336,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); + OVS_CB(skb)->tun_key = tun_key; ovs_dp_process_received_packet(vport, skb); } diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 293278c4c2df..2d961aedd71d 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -184,7 +184,8 @@ static inline struct vport *vport_from_priv(const void *priv) return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN)); } -void ovs_vport_receive(struct vport *, struct sk_buff *); +void ovs_vport_receive(struct vport *, struct sk_buff *, + struct ovs_key_ipv4_tunnel *); void ovs_vport_record_error(struct vport *, enum vport_err_type err_type); /* List of statically compiled vport implementations. Don't forget to also -- cgit v1.2.3 From aa310701e787087dbfbccf1409982a96e16c57a6 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 17 Jun 2013 17:50:33 -0700 Subject: openvswitch: Add gre tunnel support. Add gre vport implementation. Most of gre protocol processing is pushed to gre module. It make use of gre demultiplexer therefore it can co-exist with linux device based gre tunnels. Signed-off-by: Pravin B Shelar Acked-by: Jesse Gross Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 1 + net/openvswitch/Kconfig | 2 + net/openvswitch/Makefile | 3 +- net/openvswitch/datapath.h | 1 + net/openvswitch/flow.h | 18 ++- net/openvswitch/vport-gre.c | 274 +++++++++++++++++++++++++++++++++++++++ net/openvswitch/vport.c | 19 +++ net/openvswitch/vport.h | 7 + 8 files changed, 323 insertions(+), 2 deletions(-) create mode 100644 net/openvswitch/vport-gre.c (limited to 'include') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index b15a445927d6..c55efaaa9bb4 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -164,6 +164,7 @@ enum ovs_vport_type { OVS_VPORT_TYPE_UNSPEC, OVS_VPORT_TYPE_NETDEV, /* network device */ OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ + OVS_VPORT_TYPE_GRE, /* GRE tunnel. */ __OVS_VPORT_TYPE_MAX }; diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index d9ea33c361be..9fbc04a31ed6 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -19,6 +19,8 @@ config OPENVSWITCH which is able to accept configuration from a variety of sources and translate it into packet processing rules. + Open vSwitch GRE support depends on CONFIG_NET_IPGRE_DEMUX. + See http://openvswitch.org for more information and userspace utilities. diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index 15e7384745c1..01bddb2991e3 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile @@ -10,5 +10,6 @@ openvswitch-y := \ dp_notify.o \ flow.o \ vport.o \ + vport-gre.o \ vport-internal_dev.o \ - vport-netdev.o \ + vport-netdev.o diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index e88ebc2f1c54..a91486484916 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -122,6 +122,7 @@ struct dp_upcall_info { struct ovs_net { struct list_head dps; struct work_struct dp_notify_work; + struct vport_net vport_net; }; extern int ovs_net_id; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 999842f247a0..66ef7220293e 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -49,11 +49,27 @@ struct ovs_key_ipv4_tunnel { __be64 tun_id; __be32 ipv4_src; __be32 ipv4_dst; - u16 tun_flags; + __be16 tun_flags; u8 ipv4_tos; u8 ipv4_ttl; }; +static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key, + const struct iphdr *iph, __be64 tun_id, + __be16 tun_flags) +{ + tun_key->tun_id = tun_id; + tun_key->ipv4_src = iph->saddr; + tun_key->ipv4_dst = iph->daddr; + tun_key->ipv4_tos = iph->tos; + tun_key->ipv4_ttl = iph->ttl; + tun_key->tun_flags = tun_flags; + + /* clear struct padding. */ + memset((unsigned char *) tun_key + OVS_TUNNEL_KEY_SIZE, 0, + sizeof(*tun_key) - OVS_TUNNEL_KEY_SIZE); +} + struct sw_flow_key { struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */ struct { diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c new file mode 100644 index 000000000000..3a8d1900aa78 --- /dev/null +++ b/net/openvswitch/vport-gre.c @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2007-2013 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + */ + +#ifdef CONFIG_NET_IPGRE_DEMUX +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "datapath.h" +#include "vport.h" + +/* Returns the least-significant 32 bits of a __be64. */ +static __be32 be64_get_low32(__be64 x) +{ +#ifdef __BIG_ENDIAN + return (__force __be32)x; +#else + return (__force __be32)((__force u64)x >> 32); +#endif +} + +static __be16 filter_tnl_flags(__be16 flags) +{ + return flags & (TUNNEL_CSUM | TUNNEL_KEY); +} + +static struct sk_buff *__build_header(struct sk_buff *skb, + int tunnel_hlen) +{ + const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key; + struct tnl_ptk_info tpi; + + skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); + if (IS_ERR(skb)) + return NULL; + + tpi.flags = filter_tnl_flags(tun_key->tun_flags); + tpi.proto = htons(ETH_P_TEB); + tpi.key = be64_get_low32(tun_key->tun_id); + tpi.seq = 0; + gre_build_header(skb, &tpi, tunnel_hlen); + + return skb; +} + +static __be64 key_to_tunnel_id(__be32 key, __be32 seq) +{ +#ifdef __BIG_ENDIAN + return (__force __be64)((__force u64)seq << 32 | (__force u32)key); +#else + return (__force __be64)((__force u64)key << 32 | (__force u32)seq); +#endif +} + +/* Called with rcu_read_lock and BH disabled. */ +static int gre_rcv(struct sk_buff *skb, + const struct tnl_ptk_info *tpi) +{ + struct ovs_key_ipv4_tunnel tun_key; + struct ovs_net *ovs_net; + struct vport *vport; + __be64 key; + + ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); + vport = rcu_dereference(ovs_net->vport_net.gre_vport); + if (unlikely(!vport)) + return PACKET_REJECT; + + key = key_to_tunnel_id(tpi->key, tpi->seq); + ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key, + filter_tnl_flags(tpi->flags)); + + ovs_vport_receive(vport, skb, &tun_key); + return PACKET_RCVD; +} + +static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) +{ + struct net *net = ovs_dp_get_net(vport->dp); + struct flowi4 fl; + struct rtable *rt; + int min_headroom; + int tunnel_hlen; + __be16 df; + int err; + + if (unlikely(!OVS_CB(skb)->tun_key)) { + err = -EINVAL; + goto error; + } + + /* Route lookup */ + memset(&fl, 0, sizeof(fl)); + fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst; + fl.saddr = OVS_CB(skb)->tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos); + fl.flowi4_mark = skb->mark; + fl.flowi4_proto = IPPROTO_GRE; + + rt = ip_route_output_key(net, &fl); + if (IS_ERR(rt)) + return PTR_ERR(rt); + + tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags); + + min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + + tunnel_hlen + sizeof(struct iphdr) + + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { + int head_delta = SKB_DATA_ALIGN(min_headroom - + skb_headroom(skb) + + 16); + err = pskb_expand_head(skb, max_t(int, head_delta, 0), + 0, GFP_ATOMIC); + if (unlikely(err)) + goto err_free_rt; + } + + if (vlan_tx_tag_present(skb)) { + if (unlikely(!__vlan_put_tag(skb, + skb->vlan_proto, + vlan_tx_tag_get(skb)))) { + err = -ENOMEM; + goto err_free_rt; + } + skb->vlan_tci = 0; + } + + /* Push Tunnel header. */ + skb = __build_header(skb, tunnel_hlen); + if (unlikely(!skb)) { + err = 0; + goto err_free_rt; + } + + df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? + htons(IP_DF) : 0; + + skb->local_df = 1; + + return iptunnel_xmit(net, rt, skb, fl.saddr, + OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, + OVS_CB(skb)->tun_key->ipv4_tos, + OVS_CB(skb)->tun_key->ipv4_ttl, df); +err_free_rt: + ip_rt_put(rt); +error: + return err; +} + +static struct gre_cisco_protocol gre_protocol = { + .handler = gre_rcv, + .priority = 1, +}; + +static int gre_ports; +static int gre_init(void) +{ + int err; + + gre_ports++; + if (gre_ports > 1) + return 0; + + err = gre_cisco_register(&gre_protocol); + if (err) + pr_warn("cannot register gre protocol handler\n"); + + return err; +} + +static void gre_exit(void) +{ + gre_ports--; + if (gre_ports > 0) + return; + + gre_cisco_unregister(&gre_protocol); +} + +static const char *gre_get_name(const struct vport *vport) +{ + return vport_priv(vport); +} + +static struct vport *gre_create(const struct vport_parms *parms) +{ + struct net *net = ovs_dp_get_net(parms->dp); + struct ovs_net *ovs_net; + struct vport *vport; + int err; + + err = gre_init(); + if (err) + return ERR_PTR(err); + + ovs_net = net_generic(net, ovs_net_id); + if (ovsl_dereference(ovs_net->vport_net.gre_vport)) { + vport = ERR_PTR(-EEXIST); + goto error; + } + + vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms); + if (IS_ERR(vport)) + goto error; + + strncpy(vport_priv(vport), parms->name, IFNAMSIZ); + rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport); + return vport; + +error: + gre_exit(); + return vport; +} + +static void gre_tnl_destroy(struct vport *vport) +{ + struct net *net = ovs_dp_get_net(vport->dp); + struct ovs_net *ovs_net; + + ovs_net = net_generic(net, ovs_net_id); + + rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL); + ovs_vport_deferred_free(vport); + gre_exit(); +} + +const struct vport_ops ovs_gre_vport_ops = { + .type = OVS_VPORT_TYPE_GRE, + .create = gre_create, + .destroy = gre_tnl_destroy, + .get_name = gre_get_name, + .send = gre_tnl_send, +}; +#endif diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 413287a1877f..f52dfb9cb5a7 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -38,6 +38,10 @@ static const struct vport_ops *vport_ops_list[] = { &ovs_netdev_vport_ops, &ovs_internal_vport_ops, + +#ifdef CONFIG_NET_IPGRE_DEMUX + &ovs_gre_vport_ops, +#endif }; /* Protected by RCU read lock for reading, ovs_mutex for writing. */ @@ -404,3 +408,18 @@ void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type) spin_unlock(&vport->stats_lock); } + +static void free_vport_rcu(struct rcu_head *rcu) +{ + struct vport *vport = container_of(rcu, struct vport, rcu); + + ovs_vport_free(vport); +} + +void ovs_vport_deferred_free(struct vport *vport) +{ + if (!vport) + return; + + call_rcu(&vport->rcu, free_vport_rcu); +} diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 2d961aedd71d..376045c42f8b 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -34,6 +34,11 @@ struct vport_parms; /* The following definitions are for users of the vport subsytem: */ +/* The following definitions are for users of the vport subsytem: */ +struct vport_net { + struct vport __rcu *gre_vport; +}; + int ovs_vport_init(void); void ovs_vport_exit(void); @@ -152,6 +157,7 @@ enum vport_err_type { struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *, const struct vport_parms *); void ovs_vport_free(struct vport *); +void ovs_vport_deferred_free(struct vport *vport); #define VPORT_ALIGN 8 @@ -192,6 +198,7 @@ void ovs_vport_record_error(struct vport *, enum vport_err_type err_type); * add yours to the list at the top of vport.c. */ extern const struct vport_ops ovs_netdev_vport_ops; extern const struct vport_ops ovs_internal_vport_ops; +extern const struct vport_ops ovs_gre_vport_ops; static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len) -- cgit v1.2.3 From eea86af6b1e18d6fa8dc959e3ddc0100f27aff9f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 19 Jun 2013 12:51:20 +0200 Subject: net: sock: adapt SOCK_MIN_RCVBUF and SOCK_MIN_SNDBUF The current situation is that SOCK_MIN_RCVBUF is 2048 + sizeof(struct sk_buff)) while SOCK_MIN_SNDBUF is 2048. Since in both cases, skb->truesize is used for sk_{r,w}mem_alloc accounting, we should have both sizes adjusted via defining a TCP_SKB_MIN_TRUESIZE. Further, as Eric Dumazet points out, the minimal skb truesize in transmit path is SKB_TRUESIZE(2048) after commit f07d960df33c5 ("tcp: avoid frag allocation for small frames"), and tcp_sendmsg() tries to limit skb size to half the congestion window, meaning we try to build two skbs at minimum. Thus, having SOCK_MIN_SNDBUF as 2048 can hit a small regression for some applications setting to low SO_SNDBUF / SO_RCVBUF. Note that we define a TCP_SKB_MIN_TRUESIZE, because SKB_TRUESIZE(2048) adds SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), but in case of TCP skbs, the skb_shared_info is part of the 2048 bytes allocation for skb->head. The minor adaption in sk_stream_moderate_sndbuf() is to silence a warning by using a typed max macro, as similarly done in SOCK_MIN_RCVBUF occurences, that would appear otherwise. Suggested-by: Eric Dumazet Signed-off-by: Daniel Borkmann Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sock.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index 21db792bffa5..ea6206ccc896 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2047,18 +2047,21 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) sock_wake_async(sk->sk_socket, how, band); } -#define SOCK_MIN_SNDBUF 2048 -/* - * Since sk_rmem_alloc sums skb->truesize, even a small frame might need - * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak +/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might + * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak. + * Note: for send buffers, TCP works better if we can build two skbs at + * minimum. */ -#define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff)) +#define TCP_SKB_MIN_TRUESIZE (2048 + sizeof(struct sk_buff)) + +#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2) +#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE static inline void sk_stream_moderate_sndbuf(struct sock *sk) { if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); - sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); + sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); } } -- cgit v1.2.3 From bcefe17cffd06efdda3e7ad679ea743236e6271a Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 15 Jun 2013 09:39:18 +0800 Subject: tcp: introduce a per-route knob for quick ack In previous discussions, I tried to find some reasonable heuristics for delayed ACK, however this seems not possible, according to Eric: "ACKS might also be delayed because of bidirectional traffic, and is more controlled by the application response time. TCP stack can not easily estimate it." "ACK can be incredibly useful to recover from losses in a short time. The vast majority of TCP sessions are small lived, and we send one ACK per received segment anyway at beginning or retransmits to let the sender smoothly increase its cwnd, so an auto-tuning facility wont help them that much." and according to David: "ACKs are the only information we have to detect loss. And, for the same reasons that TCP VEGAS is fundamentally broken, we cannot measure the pipe or some other receiver-side-visible piece of information to determine when it's "safe" to stretch ACK. And even if it's "safe", we should not do it so that losses are accurately detected and we don't spuriously retransmit. The only way to know when the bandwidth increases is to "test" it, by sending more and more packets until drops happen. That's why all successful congestion control algorithms must operate on explicited tested pieces of information. Similarly, it's not really possible to universally know if it's safe to stretch ACK or not." It still makes sense to enable or disable quick ack mode like what TCP_QUICK_ACK does. Similar to TCP_QUICK_ACK option, but for people who can't modify the source code and still wants to control TCP delayed ACK behavior. As David suggested, this should belong to per-path scope, since different pathes may want different behaviors. Cc: Eric Dumazet Cc: Rick Jones Cc: Stephen Hemminger Cc: "David S. Miller" Cc: Thomas Graf CC: David Laight Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 2 ++ net/ipv4/tcp_input.c | 5 ++++- net/ipv4/tcp_output.c | 6 ++++-- 3 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 7a2144e1afae..eb0f1a554d7b 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -386,6 +386,8 @@ enum { #define RTAX_RTO_MIN RTAX_RTO_MIN RTAX_INITRWND, #define RTAX_INITRWND RTAX_INITRWND + RTAX_QUICKACK, +#define RTAX_QUICKACK RTAX_QUICKACK __RTAX_MAX }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 46271cdcf088..28af45abe062 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3717,6 +3717,7 @@ void tcp_reset(struct sock *sk) static void tcp_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + const struct dst_entry *dst; inet_csk_schedule_ack(sk); @@ -3728,7 +3729,9 @@ static void tcp_fin(struct sock *sk) case TCP_ESTABLISHED: /* Move to CLOSE_WAIT */ tcp_set_state(sk, TCP_CLOSE_WAIT); - inet_csk(sk)->icsk_ack.pingpong = 1; + dst = __sk_dst_get(sk); + if (!dst || !dst_metric(dst, RTAX_QUICKACK)) + inet_csk(sk)->icsk_ack.pingpong = 1; break; case TCP_CLOSE_WAIT: diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2c1333ee318..3d609490f118 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -160,6 +160,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp, { struct inet_connection_sock *icsk = inet_csk(sk); const u32 now = tcp_time_stamp; + const struct dst_entry *dst = __sk_dst_get(sk); if (sysctl_tcp_slow_start_after_idle && (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) @@ -170,8 +171,9 @@ static void tcp_event_data_sent(struct tcp_sock *tp, /* If it is a reply for ato after last received * packet, enter pingpong mode. */ - if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) - icsk->icsk_ack.pingpong = 1; + if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato && + (!dst || !dst_metric(dst, RTAX_QUICKACK))) + icsk->icsk_ack.pingpong = 1; } /* Account for an ACK we sent. */ -- cgit v1.2.3 From 9e8cda3ba84e1b85ff11b5d31ac80a753e7f9547 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 13 Jun 2013 19:37:53 -0700 Subject: ipv6: Convert use of typedef ctl_table to struct ctl_table This typedef is unnecessary and should just be removed. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- include/net/ipv6.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/ipv6.h b/include/net/ipv6.h index ab47582f6c0b..5fe564985171 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -859,8 +859,8 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } #endif #ifdef CONFIG_SYSCTL -extern ctl_table ipv6_route_table_template[]; -extern ctl_table ipv6_icmp_table_template[]; +extern struct ctl_table ipv6_route_table_template[]; +extern struct ctl_table ipv6_icmp_table_template[]; extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); extern struct ctl_table *ipv6_route_sysctl_init(struct net *net); -- cgit v1.2.3 From fedaf4ffc224a194e2d13a3ec2abe5df0bc94258 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 13 Jun 2013 19:37:54 -0700 Subject: ndisc: Convert use of typedef ctl_table to struct ctl_table This typedef is unnecessary and should just be removed. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- include/net/ndisc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 745bf741e029..949d77528f2f 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -230,7 +230,7 @@ extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, void __user *buffer, size_t *lenp, loff_t *ppos); -int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, +int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen); #endif -- cgit v1.2.3 From 681f130f39e10087475383e6771b9366e26bab0c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 20 Jun 2013 05:52:22 -0700 Subject: netfilter: xt_socket: add XT_SOCKET_NOWILDCARD flag xt_socket module can be a nice replacement to conntrack module in some cases (SYN filtering for example) But it lacks the ability to match the 3rd packet of TCP handshake (ACK coming from the client). Add a XT_SOCKET_NOWILDCARD flag to disable the wildcard mechanism. The wildcard is the legacy socket match behavior, that ignores LISTEN sockets bound to INADDR_ANY (or ipv6 equivalent) iptables -I INPUT -p tcp --syn -j SYN_CHAIN iptables -I INPUT -m socket --nowildcard -j ACCEPT Signed-off-by: Eric Dumazet Cc: Patrick McHardy Cc: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/xt_socket.h | 7 ++++ net/netfilter/xt_socket.c | 70 ++++++++++++++++++++++++++++---- 2 files changed, 69 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/netfilter/xt_socket.h b/include/uapi/linux/netfilter/xt_socket.h index 26d7217bd4f1..6315e2ac3474 100644 --- a/include/uapi/linux/netfilter/xt_socket.h +++ b/include/uapi/linux/netfilter/xt_socket.h @@ -5,10 +5,17 @@ enum { XT_SOCKET_TRANSPARENT = 1 << 0, + XT_SOCKET_NOWILDCARD = 1 << 1, }; struct xt_socket_mtinfo1 { __u8 flags; }; +#define XT_SOCKET_FLAGS_V1 XT_SOCKET_TRANSPARENT + +struct xt_socket_mtinfo2 { + __u8 flags; +}; +#define XT_SOCKET_FLAGS_V2 (XT_SOCKET_TRANSPARENT | XT_SOCKET_NOWILDCARD) #endif /* _XT_SOCKET_H */ diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 02704245710e..f8b71911037a 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -163,8 +163,11 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, bool wildcard; bool transparent = true; - /* Ignore sockets listening on INADDR_ANY */ - wildcard = (sk->sk_state != TCP_TIME_WAIT && + /* Ignore sockets listening on INADDR_ANY, + * unless XT_SOCKET_NOWILDCARD is set + */ + wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) && + sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->inet_rcv_saddr == 0); /* Ignore non-transparent sockets, @@ -197,7 +200,7 @@ socket_mt4_v0(const struct sk_buff *skb, struct xt_action_param *par) } static bool -socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par) +socket_mt4_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) { return socket_match(skb, par, par->matchinfo); } @@ -259,7 +262,7 @@ extract_icmp6_fields(const struct sk_buff *skb, } static bool -socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) +socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) { struct ipv6hdr *iph = ipv6_hdr(skb); struct udphdr _hdr, *hp = NULL; @@ -302,8 +305,11 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) bool wildcard; bool transparent = true; - /* Ignore sockets listening on INADDR_ANY */ - wildcard = (sk->sk_state != TCP_TIME_WAIT && + /* Ignore sockets listening on INADDR_ANY + * unless XT_SOCKET_NOWILDCARD is set + */ + wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) && + sk->sk_state != TCP_TIME_WAIT && ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)); /* Ignore non-transparent sockets, @@ -331,6 +337,28 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) } #endif +static int socket_mt_v1_check(const struct xt_mtchk_param *par) +{ + const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; + + if (info->flags & ~XT_SOCKET_FLAGS_V1) { + pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V1); + return -EINVAL; + } + return 0; +} + +static int socket_mt_v2_check(const struct xt_mtchk_param *par) +{ + const struct xt_socket_mtinfo2 *info = (struct xt_socket_mtinfo2 *) par->matchinfo; + + if (info->flags & ~XT_SOCKET_FLAGS_V2) { + pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V2); + return -EINVAL; + } + return 0; +} + static struct xt_match socket_mt_reg[] __read_mostly = { { .name = "socket", @@ -345,7 +373,8 @@ static struct xt_match socket_mt_reg[] __read_mostly = { .name = "socket", .revision = 1, .family = NFPROTO_IPV4, - .match = socket_mt4_v1, + .match = socket_mt4_v1_v2, + .checkentry = socket_mt_v1_check, .matchsize = sizeof(struct xt_socket_mtinfo1), .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN), @@ -356,7 +385,32 @@ static struct xt_match socket_mt_reg[] __read_mostly = { .name = "socket", .revision = 1, .family = NFPROTO_IPV6, - .match = socket_mt6_v1, + .match = socket_mt6_v1_v2, + .checkentry = socket_mt_v1_check, + .matchsize = sizeof(struct xt_socket_mtinfo1), + .hooks = (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_IN), + .me = THIS_MODULE, + }, +#endif + { + .name = "socket", + .revision = 2, + .family = NFPROTO_IPV4, + .match = socket_mt4_v1_v2, + .checkentry = socket_mt_v2_check, + .matchsize = sizeof(struct xt_socket_mtinfo1), + .hooks = (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_IN), + .me = THIS_MODULE, + }, +#ifdef XT_SOCKET_HAVE_IPV6 + { + .name = "socket", + .revision = 2, + .family = NFPROTO_IPV6, + .match = socket_mt6_v1_v2, + .checkentry = socket_mt_v2_check, .matchsize = sizeof(struct xt_socket_mtinfo1), .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN), -- cgit v1.2.3 From 073d1cf35fe45d89f5a553c21eea18b504dd6937 Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Mon, 29 Apr 2013 19:35:35 +0300 Subject: Bluetooth: Rename L2CAP_CID_LE_DATA to L2CAP_CID_ATT In future Core Specification versions the ATT CID will be just one of many possible CIDs that can be used for data transfer. Therefore, it makes sense to rename the define for the ATT CID to something less ambigous. Signed-off-by: Johan Hedberg Acked-by: Marcel Holtmann Signed-off-by: Gustavo Padovan --- include/net/bluetooth/l2cap.h | 2 +- net/bluetooth/l2cap_core.c | 14 +++++++------- net/bluetooth/l2cap_sock.c | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index fb94cf13c777..1a966afbbfa8 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -242,7 +242,7 @@ struct l2cap_conn_rsp { #define L2CAP_CID_SIGNALING 0x0001 #define L2CAP_CID_CONN_LESS 0x0002 #define L2CAP_CID_A2MP 0x0003 -#define L2CAP_CID_LE_DATA 0x0004 +#define L2CAP_CID_ATT 0x0004 #define L2CAP_CID_LE_SIGNALING 0x0005 #define L2CAP_CID_SMP 0x0006 #define L2CAP_CID_DYN_START 0x0040 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index ab9961118181..c87f8f94083d 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -504,8 +504,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) if (conn->hcon->type == LE_LINK) { /* LE connection */ chan->omtu = L2CAP_DEFAULT_MTU; - chan->scid = L2CAP_CID_LE_DATA; - chan->dcid = L2CAP_CID_LE_DATA; + chan->scid = L2CAP_CID_ATT; + chan->dcid = L2CAP_CID_ATT; } else { /* Alloc CID for connection-oriented socket */ chan->scid = l2cap_alloc_cid(conn); @@ -1344,7 +1344,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) BT_DBG(""); /* Check if we have socket listening on cid */ - pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA, + pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT, conn->src, conn->dst); if (!pchan) return; @@ -1792,7 +1792,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, auth_type = l2cap_get_auth_type(chan); - if (chan->dcid == L2CAP_CID_LE_DATA) + if (chan->dcid == L2CAP_CID_ATT) hcon = hci_connect(hdev, LE_LINK, dst, dst_type, chan->sec_level, auth_type); else @@ -6397,7 +6397,7 @@ static void l2cap_att_channel(struct l2cap_conn *conn, { struct l2cap_chan *chan; - chan = l2cap_global_chan_by_scid(0, L2CAP_CID_LE_DATA, + chan = l2cap_global_chan_by_scid(0, L2CAP_CID_ATT, conn->src, conn->dst); if (!chan) goto drop; @@ -6448,7 +6448,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) l2cap_conless_channel(conn, psm, skb); break; - case L2CAP_CID_LE_DATA: + case L2CAP_CID_ATT: l2cap_att_channel(conn, skb); break; @@ -6574,7 +6574,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) continue; } - if (chan->scid == L2CAP_CID_LE_DATA) { + if (chan->scid == L2CAP_CID_ATT) { if (!status && encrypt) { chan->sec_level = hcon->sec_level; l2cap_chan_ready(chan); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 36fed40c162c..0098af80b213 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -466,7 +466,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) { switch (chan->scid) { - case L2CAP_CID_LE_DATA: + case L2CAP_CID_ATT: if (mtu < L2CAP_LE_MIN_MTU) return false; break; @@ -630,7 +630,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, conn = chan->conn; /*change security for LE channels */ - if (chan->scid == L2CAP_CID_LE_DATA) { + if (chan->scid == L2CAP_CID_ATT) { if (!conn->hcon->out) { err = -EINVAL; break; -- cgit v1.2.3 From 1f9b9a5dc5bb8ee360db9d32b2090aac497ae82a Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Tue, 30 Apr 2013 15:29:27 -0300 Subject: Bluetooth: Make inquiry_cache_flush non-static In order to use HCI request framework in start_discovery, we'll need to call inquiry_cache_flush in mgmt.c. Therefore, this patch adds the hci_ prefix to inquiry_cache_flush and makes it non-static. Signed-off-by: Andre Guedes Acked-by: Johan Hedberg Signed-off-by: Gustavo Padovan --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 7cb6d360d147..7c150e4d65b9 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -432,6 +432,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, struct inquiry_entry *ie); bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, bool name_known, bool *ssp); +void hci_inquiry_cache_flush(struct hci_dev *hdev); /* ----- HCI Connections ----- */ enum { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index ace5e55fe5a3..43c63877c5b7 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -751,7 +751,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) hdev->discovery.state = state; } -static void inquiry_cache_flush(struct hci_dev *hdev) +void hci_inquiry_cache_flush(struct hci_dev *hdev) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *p, *n; @@ -964,7 +964,7 @@ int hci_inquiry(void __user *arg) hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { - inquiry_cache_flush(hdev); + hci_inquiry_cache_flush(hdev); do_inquiry = 1; } hci_dev_unlock(hdev); @@ -1230,7 +1230,7 @@ static int hci_dev_do_close(struct hci_dev *hdev) cancel_delayed_work_sync(&hdev->le_scan_disable); hci_dev_lock(hdev); - inquiry_cache_flush(hdev); + hci_inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock(hdev); @@ -1331,7 +1331,7 @@ int hci_dev_reset(__u16 dev) skb_queue_purge(&hdev->cmd_q); hci_dev_lock(hdev); - inquiry_cache_flush(hdev); + hci_inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock(hdev); @@ -3562,7 +3562,7 @@ int hci_do_inquiry(struct hci_dev *hdev, u8 length) if (test_bit(HCI_INQUIRY, &hdev->flags)) return -EINPROGRESS; - inquiry_cache_flush(hdev); + hci_inquiry_cache_flush(hdev); memset(&cp, 0, sizeof(cp)); memcpy(&cp.lap, lap, sizeof(cp.lap)); -- cgit v1.2.3 From 41dc2bd6d13bfccc34d05586be2eb65876a5990a Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Tue, 30 Apr 2013 15:29:30 -0300 Subject: Bluetooth: Make mgmt_start_discovery_failed static mgmt_start_discovery_failed is now only used in mgmt.c so we can make it a local function. This patch also moves the mgmt_start_ discovery_failed definition up in mgmt.c to avoid forward declaration. Signed-off-by: Andre Guedes Signed-off-by: Gustavo Padovan --- include/net/bluetooth/hci_core.h | 1 - net/bluetooth/mgmt.c | 42 ++++++++++++++++++++-------------------- 2 files changed, 21 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 7c150e4d65b9..ff4e8a5c9ceb 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1170,7 +1170,6 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 ssp, u8 *eir, u16 eir_len); int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); -int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status); int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status); int mgmt_discovering(struct hci_dev *hdev, u8 discovering); int mgmt_interleaved_discovery(struct hci_dev *hdev); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 434df715448e..a9bd271a736e 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2650,6 +2650,27 @@ int mgmt_interleaved_discovery(struct hci_dev *hdev) return err; } +static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) +{ + struct pending_cmd *cmd; + u8 type; + int err; + + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + + cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); + if (!cmd) + return -ENOENT; + + type = hdev->discovery.type; + + err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status), + &type, sizeof(type)); + mgmt_pending_remove(cmd); + + return err; +} + static void start_discovery_complete(struct hci_dev *hdev, u8 status) { BT_DBG("status %d", status); @@ -4190,27 +4211,6 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, sizeof(*ev) + eir_len, NULL); } -int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) -{ - struct pending_cmd *cmd; - u8 type; - int err; - - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - - cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); - if (!cmd) - return -ENOENT; - - type = hdev->discovery.type; - - err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status), - &type, sizeof(type)); - mgmt_pending_remove(cmd); - - return err; -} - int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status) { struct pending_cmd *cmd; -- cgit v1.2.3 From 0d8cc935e01c0fd1312a10881f4c0f1c4b4d05ab Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Tue, 30 Apr 2013 15:29:31 -0300 Subject: Bluetooth: Move discovery macros to hci_core.h Some of discovery macros will be used in hci_core so we need to define them in common place such as hci_core.h. Thus, this patch moves discovery macros to hci_core.h and also adds the DISCOV_ prefix to them. Signed-off-by: Andre Guedes Acked-by: Johan Hedberg Signed-off-by: Gustavo Padovan --- include/net/bluetooth/hci_core.h | 10 ++++++++++ net/bluetooth/mgmt.c | 24 ++++++------------------ 2 files changed, 16 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ff4e8a5c9ceb..61939a0e6cec 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1115,6 +1115,16 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event); BIT(BDADDR_LE_PUBLIC) | \ BIT(BDADDR_LE_RANDOM)) +/* These LE scan and inquiry parameters were chosen according to LE General + * Discovery Procedure specification. + */ +#define DISCOV_LE_SCAN_WIN 0x12 +#define DISCOV_LE_SCAN_INT 0x12 +#define DISCOV_LE_TIMEOUT msecs_to_jiffies(10240) +#define DISCOV_INTERLEAVED_TIMEOUT msecs_to_jiffies(5120) +#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 +#define DISCOV_BREDR_INQUIRY_LEN 0x08 + int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); int mgmt_index_added(struct hci_dev *hdev); int mgmt_index_removed(struct hci_dev *hdev); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index a9bd271a736e..6b31e93af761 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -102,18 +102,6 @@ static const u16 mgmt_events[] = { MGMT_EV_PASSKEY_NOTIFY, }; -/* - * These LE scan and inquiry parameters were chosen according to LE General - * Discovery Procedure specification. - */ -#define LE_SCAN_WIN 0x12 -#define LE_SCAN_INT 0x12 -#define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240) -#define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120) - -#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */ -#define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */ - #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ @@ -2641,7 +2629,7 @@ int mgmt_interleaved_discovery(struct hci_dev *hdev) hci_dev_lock(hdev); - err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE); + err = hci_do_inquiry(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); if (err < 0) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); @@ -2689,12 +2677,12 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status) switch (hdev->discovery.type) { case DISCOV_TYPE_LE: queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, - LE_SCAN_TIMEOUT_LE_ONLY); + DISCOV_LE_TIMEOUT); break; case DISCOV_TYPE_INTERLEAVED: queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, - LE_SCAN_TIMEOUT_BREDR_LE); + DISCOV_INTERLEAVED_TIMEOUT); break; case DISCOV_TYPE_BREDR: @@ -2770,7 +2758,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, memset(&inq_cp, 0, sizeof(inq_cp)); memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap)); - inq_cp.length = INQUIRY_LEN_BREDR; + inq_cp.length = DISCOV_BREDR_INQUIRY_LEN; hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp); break; @@ -2807,8 +2795,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, memset(¶m_cp, 0, sizeof(param_cp)); param_cp.type = LE_SCAN_ACTIVE; - param_cp.interval = cpu_to_le16(LE_SCAN_INT); - param_cp.window = cpu_to_le16(LE_SCAN_WIN); + param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT); + param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), ¶m_cp); -- cgit v1.2.3 From 4c87eaab01df271c81f6a68e3c28dbd44d348004 Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Tue, 30 Apr 2013 15:29:32 -0300 Subject: Bluetooth: Use HCI request in interleaved discovery In order to have a better HCI error handling in interleaved discovery functionality, we should use the HCI request framework. This patch updates le_scan_disable_work function so it uses the HCI request framework instead of the hci_send_cmd helper. A complete callback is registered (le_scan_disable_work_complete function) so we are able to trigger the inquiry procedure (if we are running the interleaved discovery) or to stop the discovery procedure (if we are running LE-only discovery). This patch also removes the extra logic in hci_cc_le_set_scan_enable to trigger the inquiry procedure and the mgmt_interleaved_discovery function since they become useless. Signed-off-by: Andre Guedes Signed-off-by: Gustavo Padovan --- include/net/bluetooth/hci_core.h | 1 - net/bluetooth/hci_core.c | 65 +++++++++++++++++++++++++++++++++++++++- net/bluetooth/hci_event.c | 10 ------- net/bluetooth/mgmt.c | 17 ----------- 4 files changed, 64 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 61939a0e6cec..b3bdad24c2b9 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1182,7 +1182,6 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status); int mgmt_discovering(struct hci_dev *hdev, u8 discovering); -int mgmt_interleaved_discovery(struct hci_dev *hdev); int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); bool mgmt_valid_hdev(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 43c63877c5b7..9270d7ee489d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2067,17 +2067,80 @@ int hci_cancel_le_scan(struct hci_dev *hdev) return 0; } +static void inquiry_complete(struct hci_dev *hdev, u8 status) +{ + if (status) { + BT_ERR("Failed to start inquiry: status %d", status); + + hci_dev_lock(hdev); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + hci_dev_unlock(hdev); + return; + } +} + +static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status) +{ + /* General inquiry access code (GIAC) */ + u8 lap[3] = { 0x33, 0x8b, 0x9e }; + struct hci_request req; + struct hci_cp_inquiry cp; + int err; + + if (status) { + BT_ERR("Failed to disable LE scanning: status %d", status); + return; + } + + switch (hdev->discovery.type) { + case DISCOV_TYPE_LE: + hci_dev_lock(hdev); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + hci_dev_unlock(hdev); + break; + + case DISCOV_TYPE_INTERLEAVED: + hci_req_init(&req, hdev); + + memset(&cp, 0, sizeof(cp)); + memcpy(&cp.lap, lap, sizeof(cp.lap)); + cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN; + hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp); + + hci_dev_lock(hdev); + + hci_inquiry_cache_flush(hdev); + + err = hci_req_run(&req, inquiry_complete); + if (err) { + BT_ERR("Inquiry request failed: err %d", err); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + } + + hci_dev_unlock(hdev); + break; + } +} + static void le_scan_disable_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_disable.work); struct hci_cp_le_set_scan_enable cp; + struct hci_request req; + int err; BT_DBG("%s", hdev->name); + hci_req_init(&req, hdev); + memset(&cp, 0, sizeof(cp)); + cp.enable = LE_SCAN_DISABLE; + hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); - hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); + err = hci_req_run(&req, le_scan_disable_work_complete); + if (err) + BT_ERR("Disable LE scanning request failed: err %d", err); } static void le_scan_work(struct work_struct *work) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 0e71e6c47391..faaf1f31345d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -974,16 +974,6 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, } clear_bit(HCI_LE_SCAN, &hdev->dev_flags); - - if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && - hdev->discovery.state == DISCOVERY_FINDING) { - mgmt_interleaved_discovery(hdev); - } else { - hci_dev_lock(hdev); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - hci_dev_unlock(hdev); - } - break; default: diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 6b31e93af761..743100f3ab9c 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2621,23 +2621,6 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, return err; } -int mgmt_interleaved_discovery(struct hci_dev *hdev) -{ - int err; - - BT_DBG("%s", hdev->name); - - hci_dev_lock(hdev); - - err = hci_do_inquiry(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); - if (err < 0) - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - - hci_dev_unlock(hdev); - - return err; -} - static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) { struct pending_cmd *cmd; -- cgit v1.2.3 From 1183fdcad42495073045a2d9755e0a6ac2fa874e Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Tue, 30 Apr 2013 15:29:35 -0300 Subject: Bluetooth: Make mgmt_stop_discovery_failed static mgmt_stop_discovery_failed is now only used in mgmt.c so we can make it a local function. This patch also moves the mgmt_stop_ discovery_failed definition up in mgmt.c to avoid forward declaration. Signed-off-by: Andre Guedes Signed-off-by: Gustavo Padovan --- include/net/bluetooth/hci_core.h | 1 - net/bluetooth/mgmt.c | 32 ++++++++++++++++---------------- 2 files changed, 16 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index b3bdad24c2b9..1b343ef37871 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1180,7 +1180,6 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 ssp, u8 *eir, u16 eir_len); int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); -int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status); int mgmt_discovering(struct hci_dev *hdev, u8 discovering); int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index c33bc4f4d006..69d17205745b 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2808,6 +2808,22 @@ failed: return err; } +static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status) +{ + struct pending_cmd *cmd; + int err; + + cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); + if (!cmd) + return -ENOENT; + + err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status), + &hdev->discovery.type, sizeof(hdev->discovery.type)); + mgmt_pending_remove(cmd); + + return err; +} + static void stop_discovery_complete(struct hci_dev *hdev, u8 status) { BT_DBG("status %d", status); @@ -4215,22 +4231,6 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, sizeof(*ev) + eir_len, NULL); } -int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status) -{ - struct pending_cmd *cmd; - int err; - - cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); - if (!cmd) - return -ENOENT; - - err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status), - &hdev->discovery.type, sizeof(hdev->discovery.type)); - mgmt_pending_remove(cmd); - - return err; -} - int mgmt_discovering(struct hci_dev *hdev, u8 discovering) { struct mgmt_ev_discovering ev; -- cgit v1.2.3 From 917eedc56c65ba96a3bab4c346d948e73dd872f1 Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Tue, 30 Apr 2013 15:29:37 -0300 Subject: Bluetooth: Remove LE scan helpers This patch removes the LE scan helpers hci_le_scan and hci_cancel_ le_scan and all code related to it. We now use the HCI request framework in device discovery functionality and these helpers are no longer needed. Signed-off-by: Andre Guedes Acked-by: Johan Hedberg Signed-off-by: Gustavo Padovan --- include/net/bluetooth/hci_core.h | 13 ----- net/bluetooth/hci_core.c | 113 --------------------------------------- 2 files changed, 126 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 1b343ef37871..5de7eb95ef63 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -117,13 +117,6 @@ struct oob_data { u8 randomizer[16]; }; -struct le_scan_params { - u8 type; - u16 interval; - u16 window; - int timeout; -}; - #define HCI_MAX_SHORT_NAME_LENGTH 10 struct amp_assoc { @@ -283,9 +276,6 @@ struct hci_dev { struct delayed_work le_scan_disable; - struct work_struct le_scan; - struct le_scan_params le_scan_params; - __s8 adv_tx_power; __u8 adv_data[HCI_MAX_AD_LENGTH]; __u8 adv_data_len; @@ -1222,9 +1212,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], __u8 ltk[16]); int hci_do_inquiry(struct hci_dev *hdev, u8 length); int hci_cancel_inquiry(struct hci_dev *hdev); -int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window, - int timeout); -int hci_cancel_le_scan(struct hci_dev *hdev); u8 bdaddr_to_le(u8 bdaddr_type); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 9270d7ee489d..100539fcbfe5 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1201,8 +1201,6 @@ static int hci_dev_do_close(struct hci_dev *hdev) { BT_DBG("%s %p", hdev->name, hdev); - cancel_work_sync(&hdev->le_scan); - cancel_delayed_work(&hdev->power_off); hci_req_cancel(hdev, ENODEV); @@ -1991,82 +1989,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) return mgmt_device_unblocked(hdev, bdaddr, type); } -static void le_scan_param_req(struct hci_request *req, unsigned long opt) -{ - struct le_scan_params *param = (struct le_scan_params *) opt; - struct hci_cp_le_set_scan_param cp; - - memset(&cp, 0, sizeof(cp)); - cp.type = param->type; - cp.interval = cpu_to_le16(param->interval); - cp.window = cpu_to_le16(param->window); - - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp); -} - -static void le_scan_enable_req(struct hci_request *req, unsigned long opt) -{ - struct hci_cp_le_set_scan_enable cp; - - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_ENABLE; - cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); -} - -static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval, - u16 window, int timeout) -{ - long timeo = msecs_to_jiffies(3000); - struct le_scan_params param; - int err; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) - return -EINPROGRESS; - - param.type = type; - param.interval = interval; - param.window = window; - - hci_req_lock(hdev); - - err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m, - timeo); - if (!err) - err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo); - - hci_req_unlock(hdev); - - if (err < 0) - return err; - - queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, - timeout); - - return 0; -} - -int hci_cancel_le_scan(struct hci_dev *hdev) -{ - BT_DBG("%s", hdev->name); - - if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) - return -EALREADY; - - if (cancel_delayed_work(&hdev->le_scan_disable)) { - struct hci_cp_le_set_scan_enable cp; - - /* Send HCI command to disable LE Scan */ - memset(&cp, 0, sizeof(cp)); - hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); - } - - return 0; -} - static void inquiry_complete(struct hci_dev *hdev, u8 status) { if (status) { @@ -2143,40 +2065,6 @@ static void le_scan_disable_work(struct work_struct *work) BT_ERR("Disable LE scanning request failed: err %d", err); } -static void le_scan_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan); - struct le_scan_params *param = &hdev->le_scan_params; - - BT_DBG("%s", hdev->name); - - hci_do_le_scan(hdev, param->type, param->interval, param->window, - param->timeout); -} - -int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window, - int timeout) -{ - struct le_scan_params *param = &hdev->le_scan_params; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) - return -ENOTSUPP; - - if (work_busy(&hdev->le_scan)) - return -EINPROGRESS; - - param->type = type; - param->interval = interval; - param->window = window; - param->timeout = timeout; - - queue_work(system_long_wq, &hdev->le_scan); - - return 0; -} - /* Alloc HCI device */ struct hci_dev *hci_alloc_dev(void) { @@ -2211,7 +2099,6 @@ struct hci_dev *hci_alloc_dev(void) INIT_WORK(&hdev->cmd_work, hci_cmd_work); INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); - INIT_WORK(&hdev->le_scan, le_scan_work); INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); -- cgit v1.2.3 From b0434345f2a7330be5277b63606cff26a7965982 Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Tue, 30 Apr 2013 15:29:38 -0300 Subject: Bluetooth: Remove inquiry helpers This patch removes hci_do_inquiry and hci_cancel_inquiry helpers. We now use the HCI request framework in device discovery functionality and these helpers are no longer needed. Signed-off-by: Andre Guedes Acked-by: Johan Hedberg Signed-off-by: Gustavo Padovan --- include/net/bluetooth/hci_core.h | 2 -- net/bluetooth/hci_core.c | 30 ------------------------------ 2 files changed, 32 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 5de7eb95ef63..f77885ea78c2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1210,8 +1210,6 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], __u8 ltk[16]); -int hci_do_inquiry(struct hci_dev *hdev, u8 length); -int hci_cancel_inquiry(struct hci_dev *hdev); u8 bdaddr_to_le(u8 bdaddr_type); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 100539fcbfe5..e2e9d409d0f6 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3501,36 +3501,6 @@ static void hci_cmd_work(struct work_struct *work) } } -int hci_do_inquiry(struct hci_dev *hdev, u8 length) -{ - /* General inquiry access code (GIAC) */ - u8 lap[3] = { 0x33, 0x8b, 0x9e }; - struct hci_cp_inquiry cp; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_INQUIRY, &hdev->flags)) - return -EINPROGRESS; - - hci_inquiry_cache_flush(hdev); - - memset(&cp, 0, sizeof(cp)); - memcpy(&cp.lap, lap, sizeof(cp.lap)); - cp.length = length; - - return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); -} - -int hci_cancel_inquiry(struct hci_dev *hdev) -{ - BT_DBG("%s", hdev->name); - - if (!test_bit(HCI_INQUIRY, &hdev->flags)) - return -EALREADY; - - return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); -} - u8 bdaddr_to_le(u8 bdaddr_type) { switch (bdaddr_type) { -- cgit v1.2.3 From 0a804654af62dfea4899c66561d74d72273b2921 Mon Sep 17 00:00:00 2001 From: Andrei Emeltchenko Date: Tue, 14 May 2013 11:44:17 +0300 Subject: Bluetooth: Remove unneeded flag Remove HCI_LINK_KEYS flag since using HCI_MGMT is enough for test that user space expects the kernel managing link keys. Signed-off-by: Andrei Emeltchenko Acked-by: Johan Hedberg Signed-off-by: Gustavo Padovan --- include/net/bluetooth/hci.h | 1 - net/bluetooth/mgmt.c | 2 -- 2 files changed, 3 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index e0512aaef4b8..3c592cf473da 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -107,7 +107,6 @@ enum { HCI_MGMT, HCI_PAIRABLE, HCI_SERVICE_CACHE, - HCI_LINK_KEYS, HCI_DEBUG_KEYS, HCI_UNREGISTER, diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 7ae737fcf5e7..fedc5399d465 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1736,8 +1736,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, hci_link_keys_clear(hdev); - set_bit(HCI_LINK_KEYS, &hdev->dev_flags); - if (cp->debug_keys) set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); else -- cgit v1.2.3 From 5243b6ac9ed1310f2329b3d0a830a55589e518ea Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Fri, 21 Jun 2013 16:17:11 -0700 Subject: ip_tunnel: Protect tunnel functions with CONFIG_INET guard. Tunnel constants can be used in generic code but in these cases the inline functions in ip_tunnels.h cause compilation problems if CONFIG_INET is not set. CC: Pravin Shelar Reported-by: Randy Dunlap Signed-off-by: Jesse Gross Acked-by: Randy Dunlap Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 10bbb4273f7d..b0d982471a5c 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -93,6 +93,8 @@ struct ip_tunnel_net { struct net_device *fb_tunnel_dev; }; +#ifdef CONFIG_INET + int ip_tunnel_init(struct net_device *dev); void ip_tunnel_uninit(struct net_device *dev); void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); @@ -180,4 +182,7 @@ static inline void iptunnel_xmit_stats(int err, err_stats->tx_dropped++; } } + +#endif /* CONFIG_INET */ + #endif /* __NET_IP_TUNNELS_H */ -- cgit v1.2.3 From 77e2af0312b12dccd5043a7cc9cd49ab6a212996 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 21 Jun 2013 19:38:06 +0200 Subject: net: if_arp: add ARPHRD_NETLINK type This small patch adds the definition of ARPHRD_NETLINK which can for example be used by netlink monitoring devices as device type. So that sockaddr_ll can pick it up and based on that choose the correct packet dissector. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/if_arp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h index 82c7d1bdadeb..d7fea3496f32 100644 --- a/include/uapi/linux/if_arp.h +++ b/include/uapi/linux/if_arp.h @@ -93,6 +93,7 @@ #define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ #define ARPHRD_CAIF 822 /* CAIF media type */ #define ARPHRD_IP6GRE 823 /* GRE over IPv6 */ +#define ARPHRD_NETLINK 824 /* Netlink header */ #define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ #define ARPHRD_NONE 0xFFFE /* zero header length */ -- cgit v1.2.3 From bcbde0d449eda7afa8f63280b165c8300dbd00e2 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 21 Jun 2013 19:38:07 +0200 Subject: net: netlink: virtual tap device management Similarly to the networking receive path with ptype_all taps, we add the possibility to register netdevices that are for ARPHRD_NETLINK to the netlink subsystem, so that those can be used for netlink analyzers resp. debuggers. We do not offer a direct callback function as out-of-tree modules could do crap with it. Instead, a netdevice must be registered properly and only receives a clone, managed by the netlink layer. Symbols are exported as GPL-only. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/netlink.h | 10 +++++ net/netlink/af_netlink.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) (limited to 'include') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index f78b430f4af5..86fde81ac2e6 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -145,4 +145,14 @@ static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, return __netlink_dump_start(ssk, skb, nlh, control); } +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +extern int netlink_add_tap(struct netlink_tap *nt); +extern int __netlink_remove_tap(struct netlink_tap *nt); +extern int netlink_remove_tap(struct netlink_tap *nt); + #endif /* __LINUX_NETLINK_H */ diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 275d901d7e46..6967fbcca6c5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include @@ -101,6 +102,9 @@ static atomic_t nl_table_users = ATOMIC_INIT(0); static ATOMIC_NOTIFIER_HEAD(netlink_chain); +static DEFINE_SPINLOCK(netlink_tap_lock); +static struct list_head netlink_tap_all __read_mostly; + static inline u32 netlink_group_mask(u32 group) { return group ? 1 << (group - 1) : 0; @@ -111,6 +115,100 @@ static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask]; } +int netlink_add_tap(struct netlink_tap *nt) +{ + if (unlikely(nt->dev->type != ARPHRD_NETLINK)) + return -EINVAL; + + spin_lock(&netlink_tap_lock); + list_add_rcu(&nt->list, &netlink_tap_all); + spin_unlock(&netlink_tap_lock); + + if (nt->module) + __module_get(nt->module); + + return 0; +} +EXPORT_SYMBOL_GPL(netlink_add_tap); + +int __netlink_remove_tap(struct netlink_tap *nt) +{ + bool found = false; + struct netlink_tap *tmp; + + spin_lock(&netlink_tap_lock); + + list_for_each_entry(tmp, &netlink_tap_all, list) { + if (nt == tmp) { + list_del_rcu(&nt->list); + found = true; + goto out; + } + } + + pr_warn("__netlink_remove_tap: %p not found\n", nt); +out: + spin_unlock(&netlink_tap_lock); + + if (found && nt->module) + module_put(nt->module); + + return found ? 0 : -ENODEV; +} +EXPORT_SYMBOL_GPL(__netlink_remove_tap); + +int netlink_remove_tap(struct netlink_tap *nt) +{ + int ret; + + ret = __netlink_remove_tap(nt); + synchronize_net(); + + return ret; +} +EXPORT_SYMBOL_GPL(netlink_remove_tap); + +static int __netlink_deliver_tap_skb(struct sk_buff *skb, + struct net_device *dev) +{ + struct sk_buff *nskb; + int ret = -ENOMEM; + + dev_hold(dev); + nskb = skb_clone(skb, GFP_ATOMIC); + if (nskb) { + nskb->dev = dev; + ret = dev_queue_xmit(nskb); + if (unlikely(ret > 0)) + ret = net_xmit_errno(ret); + } + + dev_put(dev); + return ret; +} + +static void __netlink_deliver_tap(struct sk_buff *skb) +{ + int ret; + struct netlink_tap *tmp; + + list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { + ret = __netlink_deliver_tap_skb(skb, tmp->dev); + if (unlikely(ret)) + break; + } +} + +static void netlink_deliver_tap(struct sk_buff *skb) +{ + rcu_read_lock(); + + if (unlikely(!list_empty(&netlink_tap_all))) + __netlink_deliver_tap(skb); + + rcu_read_unlock(); +} + static void netlink_overrun(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); @@ -1518,6 +1616,8 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = skb->len; + netlink_deliver_tap(skb); + #ifdef CONFIG_NETLINK_MMAP if (netlink_skb_is_mmaped(skb)) netlink_queue_mmaped_skb(sk, skb); @@ -1578,6 +1678,11 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, ret = -ECONNREFUSED; if (nlk->netlink_rcv != NULL) { + /* We could do a netlink_deliver_tap(skb) here as well + * but since this is intended for the kernel only, we + * should rather let it stay under the hood. + */ + ret = skb->len; netlink_skb_set_owner_r(skb, sk); NETLINK_CB(skb).sk = ssk; @@ -2975,6 +3080,8 @@ static int __init netlink_proto_init(void) nl_table[i].compare = netlink_compare; } + INIT_LIST_HEAD(&netlink_tap_all); + netlink_add_usersock_entry(); sock_register(&netlink_family_ops); -- cgit v1.2.3 From 7ae8639c9d6d5aba7990b4fad3506ff7b4667a45 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 25 Jun 2013 01:21:06 -0700 Subject: tcp: remove invalid __rcu annotation struct tcp_fastopen_context has a field named tfm, which is a pointer to a crypto_cipher structure. It currently has a __rcu annotation, which is not needed at all. tcp_fastopen_ctx is the pointer fetched by rcu_dereference(), but once we have a pointer to current tcp_fastopen_context, we do not use/need rcu_dereference() to access tfm. This fixes a lot of sparse errors like the following : net/ipv4/tcp_fastopen.c:21:31: warning: incorrect type in argument 1 (different address spaces) net/ipv4/tcp_fastopen.c:21:31: expected struct crypto_cipher *tfm net/ipv4/tcp_fastopen.c:21:31: got struct crypto_cipher [noderef] *tfm Signed-off-by: Eric Dumazet Cc: Jerry Chu Signed-off-by: David S. Miller --- include/net/tcp.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 6fa80831dc40..d1980054ec75 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1319,9 +1319,9 @@ void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc); /* Fastopen key context */ struct tcp_fastopen_context { - struct crypto_cipher __rcu *tfm; - __u8 key[TCP_FASTOPEN_KEY_LENGTH]; - struct rcu_head rcu; + struct crypto_cipher *tfm; + __u8 key[TCP_FASTOPEN_KEY_LENGTH]; + struct rcu_head rcu; }; /* write queue abstraction */ -- cgit v1.2.3 From b7b1bfce0bb68bd8f6e62a28295922785cc63781 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Sun, 23 Jun 2013 18:39:01 +0200 Subject: ipv6: split duplicate address detection and router solicitation timer This patch splits the timers for duplicate address detection and router solicitations apart. The router solicitations timer goes into inet6_dev and the dad timer stays in inet6_ifaddr. The reason behind this patch is to reduce the number of unneeded router solicitations send out by the host if additional link-local addresses are created. Currently we send out RS for every link-local address on an interface. If the RS timer fires we pick a source address with ipv6_get_lladdr. This change could hurt people adding additional link-local addresses and specifying these addresses in the radvd clients section because we no longer guarantee that we use every ll address as source address in router solicitations. Cc: Flavio Leitner Cc: Hideaki YOSHIFUJI Cc: David Stevens Signed-off-by: Hannes Frederic Sowa Reviewed-by: Flavio Leitner Signed-off-by: David S. Miller --- include/net/if_inet6.h | 8 ++- net/ipv6/addrconf.c | 164 +++++++++++++++++++++++++++---------------------- 2 files changed, 97 insertions(+), 75 deletions(-) (limited to 'include') diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index e07feb456d19..e4c5a2d2ba34 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -50,7 +50,7 @@ struct inet6_ifaddr { int state; - __u8 probes; + __u8 dad_probes; __u8 flags; __u16 scope; @@ -58,7 +58,7 @@ struct inet6_ifaddr { unsigned long cstamp; /* created timestamp */ unsigned long tstamp; /* updated timestamp */ - struct timer_list timer; + struct timer_list dad_timer; struct inet6_dev *idev; struct rt6_info *rt; @@ -195,6 +195,10 @@ struct inet6_dev { struct neigh_parms *nd_parms; struct ipv6_devconf cnf; struct ipv6_devstat stats; + + struct timer_list rs_timer; + __u8 rs_probes; + unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ struct rcu_head rcu; }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 90788a1c6bbc..c06bc76280b2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -253,37 +253,32 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev) return !qdisc_tx_is_noop(dev); } -static void addrconf_del_timer(struct inet6_ifaddr *ifp) +static void addrconf_del_rs_timer(struct inet6_dev *idev) { - if (del_timer(&ifp->timer)) + if (del_timer(&idev->rs_timer)) + __in6_dev_put(idev); +} + +static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp) +{ + if (del_timer(&ifp->dad_timer)) __in6_ifa_put(ifp); } -enum addrconf_timer_t { - AC_NONE, - AC_DAD, - AC_RS, -}; +static void addrconf_mod_rs_timer(struct inet6_dev *idev, + unsigned long when) +{ + if (!timer_pending(&idev->rs_timer)) + in6_dev_hold(idev); + mod_timer(&idev->rs_timer, jiffies + when); +} -static void addrconf_mod_timer(struct inet6_ifaddr *ifp, - enum addrconf_timer_t what, - unsigned long when) +static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp, + unsigned long when) { - if (!del_timer(&ifp->timer)) + if (!timer_pending(&ifp->dad_timer)) in6_ifa_hold(ifp); - - switch (what) { - case AC_DAD: - ifp->timer.function = addrconf_dad_timer; - break; - case AC_RS: - ifp->timer.function = addrconf_rs_timer; - break; - default: - break; - } - ifp->timer.expires = jiffies + when; - add_timer(&ifp->timer); + mod_timer(&ifp->dad_timer, jiffies + when); } static int snmp6_alloc_dev(struct inet6_dev *idev) @@ -326,6 +321,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) WARN_ON(!list_empty(&idev->addr_list)); WARN_ON(idev->mc_list != NULL); + WARN_ON(timer_pending(&idev->rs_timer)); #ifdef NET_REFCNT_DEBUG pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); @@ -357,7 +353,8 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) rwlock_init(&ndev->lock); ndev->dev = dev; INIT_LIST_HEAD(&ndev->addr_list); - + setup_timer(&ndev->rs_timer, addrconf_rs_timer, + (unsigned long)ndev); memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); ndev->cnf.mtu6 = dev->mtu; ndev->cnf.sysctl = NULL; @@ -776,7 +773,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) in6_dev_put(ifp->idev); - if (del_timer(&ifp->timer)) + if (del_timer(&ifp->dad_timer)) pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); if (ifp->state != INET6_IFADDR_STATE_DEAD) { @@ -869,9 +866,9 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, spin_lock_init(&ifa->lock); spin_lock_init(&ifa->state_lock); - init_timer(&ifa->timer); + setup_timer(&ifa->dad_timer, addrconf_dad_timer, + (unsigned long)ifa); INIT_HLIST_NODE(&ifa->addr_lst); - ifa->timer.data = (unsigned long) ifa; ifa->scope = scope; ifa->prefix_len = pfxlen; ifa->flags = flags | IFA_F_TENTATIVE; @@ -994,7 +991,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) } write_unlock_bh(&idev->lock); - addrconf_del_timer(ifp); + addrconf_del_dad_timer(ifp); ipv6_ifa_notify(RTM_DELADDR, ifp); @@ -1447,6 +1444,23 @@ try_nextdev: } EXPORT_SYMBOL(ipv6_dev_get_saddr); +static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, + unsigned char banned_flags) +{ + struct inet6_ifaddr *ifp; + int err = -EADDRNOTAVAIL; + + list_for_each_entry(ifp, &idev->addr_list, if_list) { + if (ifp->scope == IFA_LINK && + !(ifp->flags & banned_flags)) { + *addr = ifp->addr; + err = 0; + break; + } + } + return err; +} + int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, unsigned char banned_flags) { @@ -1456,17 +1470,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { - struct inet6_ifaddr *ifp; - read_lock_bh(&idev->lock); - list_for_each_entry(ifp, &idev->addr_list, if_list) { - if (ifp->scope == IFA_LINK && - !(ifp->flags & banned_flags)) { - *addr = ifp->addr; - err = 0; - break; - } - } + err = __ipv6_get_lladdr(idev, addr, banned_flags); read_unlock_bh(&idev->lock); } rcu_read_unlock(); @@ -1580,7 +1585,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) { if (ifp->flags&IFA_F_PERMANENT) { spin_lock_bh(&ifp->lock); - addrconf_del_timer(ifp); + addrconf_del_dad_timer(ifp); ifp->flags |= IFA_F_TENTATIVE; if (dad_failed) ifp->flags |= IFA_F_DADFAILED; @@ -3036,7 +3041,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) hlist_for_each_entry_rcu(ifa, h, addr_lst) { if (ifa->idev == idev) { hlist_del_init_rcu(&ifa->addr_lst); - addrconf_del_timer(ifa); + addrconf_del_dad_timer(ifa); goto restart; } } @@ -3045,6 +3050,8 @@ static int addrconf_ifdown(struct net_device *dev, int how) write_lock_bh(&idev->lock); + addrconf_del_rs_timer(idev); + /* Step 2: clear flags for stateless addrconf */ if (!how) idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); @@ -3074,7 +3081,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) while (!list_empty(&idev->addr_list)) { ifa = list_first_entry(&idev->addr_list, struct inet6_ifaddr, if_list); - addrconf_del_timer(ifa); + addrconf_del_dad_timer(ifa); list_del(&ifa->if_list); @@ -3116,10 +3123,10 @@ static int addrconf_ifdown(struct net_device *dev, int how) static void addrconf_rs_timer(unsigned long data) { - struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; - struct inet6_dev *idev = ifp->idev; + struct inet6_dev *idev = (struct inet6_dev *)data; + struct in6_addr lladdr; - read_lock(&idev->lock); + write_lock(&idev->lock); if (idev->dead || !(idev->if_flags & IF_READY)) goto out; @@ -3130,18 +3137,19 @@ static void addrconf_rs_timer(unsigned long data) if (idev->if_flags & IF_RA_RCVD) goto out; - spin_lock(&ifp->lock); - if (ifp->probes++ < idev->cnf.rtr_solicits) { - /* The wait after the last probe can be shorter */ - addrconf_mod_timer(ifp, AC_RS, - (ifp->probes == idev->cnf.rtr_solicits) ? - idev->cnf.rtr_solicit_delay : - idev->cnf.rtr_solicit_interval); - spin_unlock(&ifp->lock); + if (idev->rs_probes++ < idev->cnf.rtr_solicits) { + if (!__ipv6_get_lladdr(idev, &lladdr, IFA_F_TENTATIVE)) + ndisc_send_rs(idev->dev, &lladdr, + &in6addr_linklocal_allrouters); + else + goto out; - ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); + /* The wait after the last probe can be shorter */ + addrconf_mod_rs_timer(idev, (idev->rs_probes == + idev->cnf.rtr_solicits) ? + idev->cnf.rtr_solicit_delay : + idev->cnf.rtr_solicit_interval); } else { - spin_unlock(&ifp->lock); /* * Note: we do not support deprecated "all on-link" * assumption any longer. @@ -3150,8 +3158,8 @@ static void addrconf_rs_timer(unsigned long data) } out: - read_unlock(&idev->lock); - in6_ifa_put(ifp); + write_unlock(&idev->lock); + in6_dev_put(idev); } /* @@ -3167,8 +3175,8 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) else rand_num = net_random() % (idev->cnf.rtr_solicit_delay ? : 1); - ifp->probes = idev->cnf.dad_transmits; - addrconf_mod_timer(ifp, AC_DAD, rand_num); + ifp->dad_probes = idev->cnf.dad_transmits; + addrconf_mod_dad_timer(ifp, rand_num); } static void addrconf_dad_start(struct inet6_ifaddr *ifp) @@ -3229,40 +3237,40 @@ static void addrconf_dad_timer(unsigned long data) struct inet6_dev *idev = ifp->idev; struct in6_addr mcaddr; - if (!ifp->probes && addrconf_dad_end(ifp)) + if (!ifp->dad_probes && addrconf_dad_end(ifp)) goto out; - read_lock(&idev->lock); + write_lock(&idev->lock); if (idev->dead || !(idev->if_flags & IF_READY)) { - read_unlock(&idev->lock); + write_unlock(&idev->lock); goto out; } spin_lock(&ifp->lock); if (ifp->state == INET6_IFADDR_STATE_DEAD) { spin_unlock(&ifp->lock); - read_unlock(&idev->lock); + write_unlock(&idev->lock); goto out; } - if (ifp->probes == 0) { + if (ifp->dad_probes == 0) { /* * DAD was successful */ ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); spin_unlock(&ifp->lock); - read_unlock(&idev->lock); + write_unlock(&idev->lock); addrconf_dad_completed(ifp); goto out; } - ifp->probes--; - addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); + ifp->dad_probes--; + addrconf_mod_dad_timer(ifp, ifp->idev->nd_parms->retrans_time); spin_unlock(&ifp->lock); - read_unlock(&idev->lock); + write_unlock(&idev->lock); /* send a neighbour solicitation for our addr */ addrconf_addr_solict_mult(&ifp->addr, &mcaddr); @@ -3274,6 +3282,9 @@ out: static void addrconf_dad_completed(struct inet6_ifaddr *ifp) { struct net_device *dev = ifp->idev->dev; + struct in6_addr lladdr; + + addrconf_del_dad_timer(ifp); /* * Configure the address for reception. Now it is valid. @@ -3294,13 +3305,20 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) * [...] as part of DAD [...] there is no need * to delay again before sending the first RS */ - ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); + if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) + ndisc_send_rs(dev, &lladdr, + &in6addr_linklocal_allrouters); + else + return; - spin_lock_bh(&ifp->lock); - ifp->probes = 1; + write_lock_bh(&ifp->idev->lock); + spin_lock(&ifp->lock); + ifp->idev->rs_probes = 1; ifp->idev->if_flags |= IF_RS_SENT; - addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval); - spin_unlock_bh(&ifp->lock); + addrconf_mod_rs_timer(ifp->idev, + ifp->idev->cnf.rtr_solicit_interval); + spin_unlock(&ifp->lock); + write_unlock_bh(&ifp->idev->lock); } } -- cgit v1.2.3 From f072d7aba79c73c3bb4ff1a9b58be5525d755d27 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 25 Jun 2013 18:17:25 +0200 Subject: net: sctp: remove TEST_FRAME ifdef We do neither ship a test_frame.h, nor will this be compatible with the 2.5 out-of-tree lksctp kernel test suite anyway. So remove this artefact. Signed-off-by: Daniel Borkmann Acked-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 6321c081118f..15214a825f92 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -188,11 +188,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly; * Section: Macros, externs, and inlines */ - -#ifdef TEST_FRAME -#include -#else - /* spin lock wrappers. */ #define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags) #define sctp_spin_unlock_irqrestore(lock, flags) \ @@ -218,8 +213,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly; #define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field) #define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field) -#endif /* !TEST_FRAME */ - /* sctp mib definitions */ enum { SCTP_MIB_NUM = 0, -- cgit v1.2.3 From d36f82b2435690d8742235d7bdc5bb5e878077e3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 25 Jun 2013 18:17:26 +0200 Subject: ktime: add ms_to_ktime() and ktime_add_ms() helpers Add two ktime helper functions that i) convert a given msec value to a ktime structure and ii) that adds a msec value to a ktime structure. Signed-off-by: Daniel Borkmann Acked-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/linux/ktime.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/ktime.h b/include/linux/ktime.h index bbca12804d12..b4fa5e4cd158 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -323,6 +323,11 @@ static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) return ktime_add_ns(kt, usec * 1000); } +static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec) +{ + return ktime_add_ns(kt, msec * NSEC_PER_MSEC); +} + static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) { return ktime_sub_ns(kt, usec * 1000); @@ -366,7 +371,15 @@ extern void ktime_get_ts(struct timespec *ts); static inline ktime_t ns_to_ktime(u64 ns) { static const ktime_t ktime_zero = { .tv64 = 0 }; + return ktime_add_ns(ktime_zero, ns); } +static inline ktime_t ms_to_ktime(u64 ms) +{ + static const ktime_t ktime_zero = { .tv64 = 0 }; + + return ktime_add_ms(ktime_zero, ms); +} + #endif -- cgit v1.2.3 From 52db882f3fc2903014e638ee91e690085fe37fdb Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 25 Jun 2013 18:17:27 +0200 Subject: net: sctp: migrate cookie life from timeval to ktime Currently, SCTP code defines its own timeval functions (since timeval is rarely used inside the kernel by others), namely tv_lt() and TIMEVAL_ADD() macros, that operate on SCTP cookie expiration. We might as well remove all those, and operate directly on ktime structures for a couple of reasons: ktime is available on all archs; complexity of ktime calculations depending on the arch is less than (reduces to a simple arithmetic operations on archs with BITS_PER_LONG == 64 or CONFIG_KTIME_SCALAR) or equal to timeval functions (other archs); code becomes more readable; macros can be thrown out. Signed-off-by: Daniel Borkmann Acked-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 18 ------------------ include/net/sctp/structs.h | 6 +++--- net/sctp/associola.c | 8 +------- net/sctp/sm_make_chunk.c | 19 ++++++++----------- net/sctp/socket.c | 14 +++----------- 5 files changed, 15 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 15214a825f92..e6b95bc4d8e6 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -560,24 +560,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\ /* Round an int up to the next multiple of 4. */ #define WORD_ROUND(s) (((s)+3)&~3) -/* Compare two timevals. */ -#define tv_lt(s, t) \ - (s.tv_sec < t.tv_sec || (s.tv_sec == t.tv_sec && s.tv_usec < t.tv_usec)) - -/* Add tv1 to tv2. */ -#define TIMEVAL_ADD(tv1, tv2) \ -({ \ - suseconds_t usecs = (tv2).tv_usec + (tv1).tv_usec; \ - time_t secs = (tv2).tv_sec + (tv1).tv_sec; \ -\ - if (usecs >= 1000000) { \ - usecs -= 1000000; \ - secs++; \ - } \ - (tv2).tv_sec = secs; \ - (tv2).tv_usec = usecs; \ -}) - /* External references. */ extern struct proto sctp_prot; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 1bd4c4144fe8..e745c92a1532 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -54,7 +54,7 @@ #ifndef __sctp_structs_h__ #define __sctp_structs_h__ -#include /* We get struct timespec. */ +#include #include /* linux/in.h needs this!! */ #include /* We get struct sockaddr_in. */ #include /* We get struct in6_addr */ @@ -284,7 +284,7 @@ struct sctp_cookie { __u32 peer_ttag; /* When does this cookie expire? */ - struct timeval expiration; + ktime_t expiration; /* Number of inbound/outbound streams which are set * and negotiated during the INIT process. @@ -1537,7 +1537,7 @@ struct sctp_association { sctp_state_t state; /* The cookie life I award for any cookie. */ - struct timeval cookie_life; + ktime_t cookie_life; /* Overall : The overall association error count. * Error Count : [Clear this any time I get something.] diff --git a/net/sctp/associola.c b/net/sctp/associola.c index bf6e6bd553c0..9a383a8774e8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -102,13 +102,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); asoc->state = SCTP_STATE_CLOSED; - - /* Set these values from the socket values, a conversion between - * millsecons to seconds/microseconds must also be done. - */ - asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000; - asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) - * 1000; + asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life); asoc->frag_point = 0; asoc->user_frag = sp->user_frag; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index fc8548743ed5..dd71f1f9ba10 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1630,8 +1630,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, cookie->c.adaptation_ind = asoc->peer.adaptation_ind; /* Set an expiration time for the cookie. */ - do_gettimeofday(&cookie->c.expiration); - TIMEVAL_ADD(asoc->cookie_life, cookie->c.expiration); + cookie->c.expiration = ktime_add(asoc->cookie_life, + ktime_get()); /* Copy the peer's init packet. */ memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, @@ -1680,7 +1680,7 @@ struct sctp_association *sctp_unpack_cookie( unsigned int len; sctp_scope_t scope; struct sk_buff *skb = chunk->skb; - struct timeval tv; + ktime_t kt; struct hash_desc desc; /* Header size is static data prior to the actual cookie, including @@ -1757,11 +1757,11 @@ no_hmac: * down the new association establishment instead of every packet. */ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) - skb_get_timestamp(skb, &tv); + kt = skb_get_ktime(skb); else - do_gettimeofday(&tv); + kt = ktime_get(); - if (!asoc && tv_lt(bear_cookie->expiration, tv)) { + if (!asoc && ktime_compare(bear_cookie->expiration, kt) < 0) { /* * Section 3.3.10.3 Stale Cookie Error (3) * @@ -1773,9 +1773,7 @@ no_hmac: len = ntohs(chunk->chunk_hdr->length); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { - suseconds_t usecs = (tv.tv_sec - - bear_cookie->expiration.tv_sec) * 1000000L + - tv.tv_usec - bear_cookie->expiration.tv_usec; + suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); __be32 n = htonl(usecs); sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, @@ -2514,8 +2512,7 @@ do_addr_param: /* Suggested Cookie Life span increment's unit is msec, * (1/1000sec). */ - asoc->cookie_life.tv_sec += stale / 1000; - asoc->cookie_life.tv_usec += (stale % 1000) * 1000; + asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale); break; case SCTP_PARAM_HOST_NAME_ADDRESS: diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 32db19ba4a21..4c47e5578d71 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2910,13 +2910,8 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsig asoc->max_retrans = assocparams.sasoc_asocmaxrxt; } - if (assocparams.sasoc_cookie_life != 0) { - asoc->cookie_life.tv_sec = - assocparams.sasoc_cookie_life / 1000; - asoc->cookie_life.tv_usec = - (assocparams.sasoc_cookie_life % 1000) - * 1000; - } + if (assocparams.sasoc_cookie_life != 0) + asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); } else { /* Set the values to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); @@ -5074,10 +5069,7 @@ static int sctp_getsockopt_associnfo(struct sock *sk, int len, assocparams.sasoc_asocmaxrxt = asoc->max_retrans; assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; assocparams.sasoc_local_rwnd = asoc->a_rwnd; - assocparams.sasoc_cookie_life = (asoc->cookie_life.tv_sec - * 1000) + - (asoc->cookie_life.tv_usec - / 1000); + assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); list_for_each(pos, &asoc->peer.transport_addr_list) { cnt ++; -- cgit v1.2.3 From 2d48d67fa8cd129ea85ea02d91b4a793286866f8 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 24 Jun 2013 10:28:03 +0300 Subject: net: poll/select low latency socket support select/poll busy-poll support. Split sysctl value into two separate ones, one for read and one for poll. updated Documentation/sysctl/net.txt Add a new poll flag POLL_LL. When this flag is set, sock_poll will call sk_poll_ll if possible. sock_poll sets this flag in its return value to indicate to select/poll when a socket that can busy poll is found. When poll/select have nothing to report, call the low-level sock_poll again until we are out of time or we find something. Once the system call finds something, it stops setting POLL_LL, so it can return the result to the user ASAP. Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 18 ++++++++++++++++-- fs/select.c | 34 +++++++++++++++++++++++++++++----- include/net/ll_poll.h | 35 ++++++++++++++++++++++------------- include/uapi/asm-generic/poll.h | 2 ++ net/core/sock.c | 2 +- net/core/sysctl_net_core.c | 8 ++++++++ net/socket.c | 14 +++++++++++++- 7 files changed, 91 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 5369879eafe2..e658bbfb641f 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -50,13 +50,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt, it's a Per-CPU variable. Default: 64 -low_latency_poll +low_latency_read ---------------- -Low latency busy poll timeout. (needs CONFIG_NET_LL_RX_POLL) +Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) Approximate time in us to spin waiting for packets on the device queue. +This sets the default value of the SO_LL socket option. +Can be set or overridden per socket by setting socket option SO_LL. Recommended value is 50. May increase power usage. Default: 0 (off) +low_latency_poll +---------------- +Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) +Approximate time in us to spin waiting for packets on the device queue. +Recommended value depends on the number of sockets you poll on. +For several sockets 50, for several hundreds 100. +For more than that you probably want to use epoll. +Note that only sockets with SO_LL set will be busy polled, so you want to either +selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally. +May increase power usage. +Default: 0 (off) + rmem_default ------------ diff --git a/fs/select.c b/fs/select.c index 8c1c96c27062..79b876eb91da 100644 --- a/fs/select.c +++ b/fs/select.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -384,9 +385,10 @@ get_max: #define POLLEX_SET (POLLPRI) static inline void wait_key_set(poll_table *wait, unsigned long in, - unsigned long out, unsigned long bit) + unsigned long out, unsigned long bit, + unsigned int ll_flag) { - wait->_key = POLLEX_SET; + wait->_key = POLLEX_SET | ll_flag; if (in & bit) wait->_key |= POLLIN_SET; if (out & bit) @@ -400,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) poll_table *wait; int retval, i, timed_out = 0; unsigned long slack = 0; + unsigned int ll_flag = POLL_LL; + u64 ll_time = ll_end_time(); rcu_read_lock(); retval = max_select_fd(n, fds); @@ -422,6 +426,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; + bool can_ll = false; inp = fds->in; outp = fds->out; exp = fds->ex; rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; @@ -449,7 +454,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) f_op = f.file->f_op; mask = DEFAULT_POLLMASK; if (f_op && f_op->poll) { - wait_key_set(wait, in, out, bit); + wait_key_set(wait, in, out, + bit, ll_flag); mask = (*f_op->poll)(f.file, wait); } fdput(f); @@ -468,6 +474,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) retval++; wait->_qproc = NULL; } + if (mask & POLL_LL) + can_ll = true; + /* got something, stop busy polling */ + if (retval) + ll_flag = 0; } } if (res_in) @@ -486,6 +497,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) break; } + if (can_ll && can_poll_ll(ll_time)) + continue; + /* * If this is the first loop and we have a timeout * given, then we convert to ktime_t and set the to @@ -717,7 +731,8 @@ struct poll_list { * pwait poll_table will be used by the fd-provided poll handler for waiting, * if pwait->_qproc is non-NULL. */ -static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) +static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, + bool *can_ll, unsigned int ll_flag) { unsigned int mask; int fd; @@ -731,7 +746,10 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) mask = DEFAULT_POLLMASK; if (f.file->f_op && f.file->f_op->poll) { pwait->_key = pollfd->events|POLLERR|POLLHUP; + pwait->_key |= ll_flag; mask = f.file->f_op->poll(f.file, pwait); + if (mask & POLL_LL) + *can_ll = true; } /* Mask out unneeded events. */ mask &= pollfd->events | POLLERR | POLLHUP; @@ -750,6 +768,8 @@ static int do_poll(unsigned int nfds, struct poll_list *list, ktime_t expire, *to = NULL; int timed_out = 0, count = 0; unsigned long slack = 0; + unsigned int ll_flag = POLL_LL; + u64 ll_time = ll_end_time(); /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -762,6 +782,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, for (;;) { struct poll_list *walk; + bool can_ll = false; for (walk = list; walk != NULL; walk = walk->next) { struct pollfd * pfd, * pfd_end; @@ -776,9 +797,10 @@ static int do_poll(unsigned int nfds, struct poll_list *list, * this. They'll get immediately deregistered * when we break out and return. */ - if (do_pollfd(pfd, pt)) { + if (do_pollfd(pfd, pt, &can_ll, ll_flag)) { count++; pt->_qproc = NULL; + ll_flag = 0; } } } @@ -795,6 +817,8 @@ static int do_poll(unsigned int nfds, struct poll_list *list, if (count || timed_out) break; + if (can_ll && can_poll_ll(ll_time)) + continue; /* * If this is the first loop and we have a timeout * given, then we convert to ktime_t and set the to diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index fcc7c365cee5..5bf2b3a6129e 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -30,6 +30,7 @@ #ifdef CONFIG_NET_LL_RX_POLL struct napi_struct; +extern unsigned int sysctl_net_ll_read __read_mostly; extern unsigned int sysctl_net_ll_poll __read_mostly; /* return values from ndo_ll_poll */ @@ -38,17 +39,18 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; /* we can use sched_clock() because we don't care much about precision * we only care that the average is bounded + * we don't mind a ~2.5% imprecision so <<10 instead of *1000 + * sk->sk_ll_usec is a u_int so this can't overflow */ -static inline u64 ll_end_time(struct sock *sk) +static inline u64 ll_sk_end_time(struct sock *sk) { - u64 end_time = ACCESS_ONCE(sk->sk_ll_usec); - - /* we don't mind a ~2.5% imprecision - * sk->sk_ll_usec is a u_int so this can't overflow - */ - end_time = (end_time << 10) + sched_clock(); + return ((u64)ACCESS_ONCE(sk->sk_ll_usec) << 10) + sched_clock(); +} - return end_time; +/* in poll/select we use the global sysctl_net_ll_poll value */ +static inline u64 ll_end_time(void) +{ + return ((u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10) + sched_clock(); } static inline bool sk_valid_ll(struct sock *sk) @@ -62,10 +64,13 @@ static inline bool can_poll_ll(u64 end_time) return !time_after64(sched_clock(), end_time); } +/* when used in sock_poll() nonblock is known at compile time to be true + * so the loop and end_time will be optimized out + */ static inline bool sk_poll_ll(struct sock *sk, int nonblock) { + u64 end_time = nonblock ? 0 : ll_sk_end_time(sk); const struct net_device_ops *ops; - u64 end_time = ll_end_time(sk); struct napi_struct *napi; int rc = false; @@ -84,7 +89,6 @@ static inline bool sk_poll_ll(struct sock *sk, int nonblock) goto out; do { - rc = ops->ndo_ll_poll(napi); if (rc == LL_FLUSH_FAILED) @@ -95,8 +99,8 @@ static inline bool sk_poll_ll(struct sock *sk, int nonblock) NET_ADD_STATS_BH(sock_net(sk), LINUX_MIB_LOWLATENCYRXPACKETS, rc); - } while (skb_queue_empty(&sk->sk_receive_queue) - && can_poll_ll(end_time) && !nonblock); + } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && + can_poll_ll(end_time)); rc = !skb_queue_empty(&sk->sk_receive_queue); out: @@ -118,7 +122,12 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) #else /* CONFIG_NET_LL_RX_POLL */ -static inline u64 ll_end_time(struct sock *sk) +static inline u64 sk_ll_end_time(struct sock *sk) +{ + return 0; +} + +static inline u64 ll_end_time(void) { return 0; } diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h index 9ce7f44aebd2..4aee586979ca 100644 --- a/include/uapi/asm-generic/poll.h +++ b/include/uapi/asm-generic/poll.h @@ -30,6 +30,8 @@ #define POLLFREE 0x4000 /* currently only for epoll */ +#define POLL_LL 0x8000 + struct pollfd { int fd; short events; diff --git a/net/core/sock.c b/net/core/sock.c index 1e744b12fda3..b6c619f4d47b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2307,7 +2307,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) #ifdef CONFIG_NET_LL_RX_POLL sk->sk_napi_id = 0; - sk->sk_ll_usec = sysctl_net_ll_poll; + sk->sk_ll_usec = sysctl_net_ll_read; #endif /* diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 62702c2053de..afc677eadd93 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -306,6 +306,14 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "low_latency_read", + .data = &sysctl_net_ll_read, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec + }, +# #endif #endif /* CONFIG_NET */ { diff --git a/net/socket.c b/net/socket.c index 3eec3f76b49c..4da14cbd49b6 100644 --- a/net/socket.c +++ b/net/socket.c @@ -107,6 +107,7 @@ #include #ifdef CONFIG_NET_LL_RX_POLL +unsigned int sysctl_net_ll_read __read_mostly; unsigned int sysctl_net_ll_poll __read_mostly; #endif @@ -1147,13 +1148,24 @@ EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { + unsigned int ll_flag = 0; struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; - return sock->ops->poll(file, sock, wait); + + if (sk_valid_ll(sock->sk)) { + /* this socket can poll_ll so tell the system call */ + ll_flag = POLL_LL; + + /* once, only if requested by syscall */ + if (wait && (wait->_key & POLL_LL)) + sk_poll_ll(sock->sk, 1); + } + + return ll_flag | sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) -- cgit v1.2.3 From 2be5c76794b0e570aa87b012df5ac864ce668a74 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Tue, 25 Jun 2013 16:04:21 -0400 Subject: macvtap: Let TUNSETOFFLOAD actually controll offload features. When the user issues TUNSETOFFLOAD ioctl, macvtap does not do anything other then to verify arguments. This patch adds functionality to allow users to actually control offload features. NETIF_F_GSO and NETIF_F_GRO are always on, but the rest of the features can be controlled. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 10 +++++++ drivers/net/macvtap.c | 65 +++++++++++++++++++++++++++++++++++++++++++++- include/linux/if_macvlan.h | 2 ++ 3 files changed, 76 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d811b06e2ccd..18373b6ae37d 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -638,6 +638,14 @@ static int macvlan_ethtool_get_settings(struct net_device *dev, return __ethtool_get_settings(vlan->lowerdev, cmd); } +static netdev_features_t macvlan_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + + return features & (vlan->set_features | ~MACVLAN_FEATURES); +} + static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, .get_settings = macvlan_ethtool_get_settings, @@ -651,6 +659,7 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, .ndo_change_mtu = macvlan_change_mtu, + .ndo_fix_features = macvlan_fix_features, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, .ndo_set_rx_mode = macvlan_set_mac_lists, @@ -791,6 +800,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->port = port; vlan->receive = receive; vlan->forward = forward; + vlan->set_features = MACVLAN_FEATURES; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index d7856a8f589a..7eab01975ed1 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -65,6 +65,9 @@ static struct cdev macvtap_cdev; static const struct proto_ops macvtap_socket_ops; +#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ + NETIF_F_TSO6 | NETIF_F_UFO) +#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) /* * RCU usage: * The macvtap_queue and the macvlan_dev are loosely coupled, the @@ -349,6 +352,11 @@ static int macvtap_newlink(struct net *src_net, struct macvlan_dev *vlan = netdev_priv(dev); INIT_LIST_HEAD(&vlan->queue_list); + /* Since macvlan supports all offloads by default, make + * tap support all offloads also. + */ + vlan->tap_features = TUN_OFFLOADS; + /* Don't put anything that may fail after macvlan_common_newlink * because we can't undo what it does. */ @@ -958,6 +966,58 @@ static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) return ret; } +static int set_offload(struct macvtap_queue *q, unsigned long arg) +{ + struct macvlan_dev *vlan; + netdev_features_t features; + netdev_features_t feature_mask = 0; + + vlan = rtnl_dereference(q->vlan); + if (!vlan) + return -ENOLINK; + + features = vlan->dev->features; + + if (arg & TUN_F_CSUM) { + feature_mask = NETIF_F_HW_CSUM; + + if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { + if (arg & TUN_F_TSO_ECN) + feature_mask |= NETIF_F_TSO_ECN; + if (arg & TUN_F_TSO4) + feature_mask |= NETIF_F_TSO; + if (arg & TUN_F_TSO6) + feature_mask |= NETIF_F_TSO6; + } + + if (arg & TUN_F_UFO) + feature_mask |= NETIF_F_UFO; + } + + /* tun/tap driver inverts the usage for TSO offloads, where + * setting the TSO bit means that the userspace wants to + * accept TSO frames and turning it off means that user space + * does not support TSO. + * For macvtap, we have to invert it to mean the same thing. + * When user space turns off TSO, we turn off GSO/LRO so that + * user-space will not receive TSO frames. + */ + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) + features |= RX_OFFLOADS; + else + features &= ~RX_OFFLOADS; + + /* tap_features are the same as features on tun/tap and + * reflect user expectations. + */ + vlan->tap_features = vlan->dev->features & + (feature_mask | ~TUN_OFFLOADS); + vlan->set_features = features; + netdev_update_features(vlan->dev); + + return 0; +} + /* * provide compatibility with generic tun/tap interface */ @@ -1050,7 +1110,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, got enabled for forwarded frames */ if (!(q->flags & IFF_VNET_HDR)) return -EINVAL; - return 0; + rtnl_lock(); + ret = set_offload(q, arg); + rtnl_unlock(); + return ret; default: return -EINVAL; diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index f49a9f66c3d9..ddd33fd5904d 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -65,6 +65,7 @@ struct macvlan_dev { DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); + netdev_features_t set_features; enum macvlan_mode mode; u16 flags; int (*receive)(struct sk_buff *skb); @@ -75,6 +76,7 @@ struct macvlan_dev { struct list_head queue_list; int numvtaps; int numqueues; + netdev_features_t tap_features; int minor; }; -- cgit v1.2.3 From bba54de5bdd107d3841b560f1a9cb0ed06e79533 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 16 Jun 2013 09:09:36 +0300 Subject: ipvs: provide iph to schedulers Before now the schedulers needed access only to IP addresses and it was easy to get them from skb by using ip_vs_fill_iph_addr_only. New changes for the SH scheduler will need the protocol and ports which is difficult to get from skb for the IPv6 case. As we have all the data in the iph structure, to avoid the same slow lookups provide the iph to schedulers. Signed-off-by: Julian Anastasov Acked-by: Hans Schillstrom Signed-off-by: Simon Horman --- include/net/ip_vs.h | 28 ++-------------------------- net/netfilter/ipvs/ip_vs_core.c | 4 ++-- net/netfilter/ipvs/ip_vs_dh.c | 10 ++++------ net/netfilter/ipvs/ip_vs_lblc.c | 12 +++++------- net/netfilter/ipvs/ip_vs_lblcr.c | 12 +++++------- net/netfilter/ipvs/ip_vs_lc.c | 3 ++- net/netfilter/ipvs/ip_vs_nq.c | 3 ++- net/netfilter/ipvs/ip_vs_rr.c | 3 ++- net/netfilter/ipvs/ip_vs_sed.c | 3 ++- net/netfilter/ipvs/ip_vs_sh.c | 10 ++++------ net/netfilter/ipvs/ip_vs_wlc.c | 3 ++- net/netfilter/ipvs/ip_vs_wrr.c | 3 ++- 12 files changed, 34 insertions(+), 60 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 4405886980c7..f5faf859876e 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -197,31 +197,6 @@ ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr) } } -/* This function is a faster version of ip_vs_fill_iph_skb(). - * Where we only populate {s,d}addr (and avoid calling ipv6_find_hdr()). - * This is used by the some of the ip_vs_*_schedule() functions. - * (Mostly done to avoid ABI breakage of external schedulers) - */ -static inline void -ip_vs_fill_iph_addr_only(int af, const struct sk_buff *skb, - struct ip_vs_iphdr *iphdr) -{ -#ifdef CONFIG_IP_VS_IPV6 - if (af == AF_INET6) { - const struct ipv6hdr *iph = - (struct ipv6hdr *)skb_network_header(skb); - iphdr->saddr.in6 = iph->saddr; - iphdr->daddr.in6 = iph->daddr; - } else -#endif - { - const struct iphdr *iph = - (struct iphdr *)skb_network_header(skb); - iphdr->saddr.ip = iph->saddr; - iphdr->daddr.ip = iph->daddr; - } -} - static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, const union nf_inet_addr *src) { @@ -814,7 +789,8 @@ struct ip_vs_scheduler { /* selecting a server from the given service */ struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, - const struct sk_buff *skb); + const struct sk_buff *skb, + struct ip_vs_iphdr *iph); }; /* The persistence engine object */ diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 05565d2b3a61..e9b0330f220d 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -305,7 +305,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, * return *ignored=0 i.e. ICMP and NF_DROP */ sched = rcu_dereference(svc->scheduler); - dest = sched->schedule(svc, skb); + dest = sched->schedule(svc, skb, iph); if (!dest) { IP_VS_DBG(1, "p-schedule: no dest found.\n"); kfree(param.pe_data); @@ -452,7 +452,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, } sched = rcu_dereference(svc->scheduler); - dest = sched->schedule(svc, skb); + dest = sched->schedule(svc, skb, iph); if (dest == NULL) { IP_VS_DBG(1, "Schedule: no dest found.\n"); return NULL; diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index ccab120df45e..c3b84546ea9e 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c @@ -214,18 +214,16 @@ static inline int is_overloaded(struct ip_vs_dest *dest) * Destination hashing scheduling */ static struct ip_vs_dest * -ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest; struct ip_vs_dh_state *s; - struct ip_vs_iphdr iph; - - ip_vs_fill_iph_addr_only(svc->af, skb, &iph); IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); s = (struct ip_vs_dh_state *) svc->sched_data; - dest = ip_vs_dh_get(svc->af, s, &iph.daddr); + dest = ip_vs_dh_get(svc->af, s, &iph->daddr); if (!dest || !(dest->flags & IP_VS_DEST_F_AVAILABLE) || atomic_read(&dest->weight) <= 0 @@ -235,7 +233,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) } IP_VS_DBG_BUF(6, "DH: destination IP address %s --> server %s:%d\n", - IP_VS_DBG_ADDR(svc->af, &iph.daddr), + IP_VS_DBG_ADDR(svc->af, &iph->daddr), IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 44595b8ae37f..1383b0eadc0e 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -487,19 +487,17 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) * Locality-Based (weighted) Least-Connection scheduling */ static struct ip_vs_dest * -ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_lblc_table *tbl = svc->sched_data; - struct ip_vs_iphdr iph; struct ip_vs_dest *dest = NULL; struct ip_vs_lblc_entry *en; - ip_vs_fill_iph_addr_only(svc->af, skb, &iph); - IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); /* First look in our cache */ - en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr); + en = ip_vs_lblc_get(svc->af, tbl, &iph->daddr); if (en) { /* We only hold a read lock, but this is atomic */ en->lastuse = jiffies; @@ -529,12 +527,12 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) /* If we fail to create a cache entry, we'll just use the valid dest */ spin_lock_bh(&svc->sched_lock); if (!tbl->dead) - ip_vs_lblc_new(tbl, &iph.daddr, dest); + ip_vs_lblc_new(tbl, &iph->daddr, dest); spin_unlock_bh(&svc->sched_lock); out: IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n", - IP_VS_DBG_ADDR(svc->af, &iph.daddr), + IP_VS_DBG_ADDR(svc->af, &iph->daddr), IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); return dest; diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 876937db0bf4..3cd85b2fc67c 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -655,19 +655,17 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) * Locality-Based (weighted) Least-Connection scheduling */ static struct ip_vs_dest * -ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_lblcr_table *tbl = svc->sched_data; - struct ip_vs_iphdr iph; struct ip_vs_dest *dest; struct ip_vs_lblcr_entry *en; - ip_vs_fill_iph_addr_only(svc->af, skb, &iph); - IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); /* First look in our cache */ - en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr); + en = ip_vs_lblcr_get(svc->af, tbl, &iph->daddr); if (en) { en->lastuse = jiffies; @@ -718,12 +716,12 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) /* If we fail to create a cache entry, we'll just use the valid dest */ spin_lock_bh(&svc->sched_lock); if (!tbl->dead) - ip_vs_lblcr_new(tbl, &iph.daddr, dest); + ip_vs_lblcr_new(tbl, &iph->daddr, dest); spin_unlock_bh(&svc->sched_lock); out: IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n", - IP_VS_DBG_ADDR(svc->af, &iph.daddr), + IP_VS_DBG_ADDR(svc->af, &iph->daddr), IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); return dest; diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c index 5128e338a749..2bdcb1cf2127 100644 --- a/net/netfilter/ipvs/ip_vs_lc.c +++ b/net/netfilter/ipvs/ip_vs_lc.c @@ -26,7 +26,8 @@ * Least Connection scheduling */ static struct ip_vs_dest * -ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest, *least = NULL; unsigned int loh = 0, doh; diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c index 646cfd4baa73..d8d9860934fe 100644 --- a/net/netfilter/ipvs/ip_vs_nq.c +++ b/net/netfilter/ipvs/ip_vs_nq.c @@ -55,7 +55,8 @@ ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) * Weighted Least Connection scheduling */ static struct ip_vs_dest * -ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest, *least = NULL; unsigned int loh = 0, doh; diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c index c35986c793d9..176b87c35e34 100644 --- a/net/netfilter/ipvs/ip_vs_rr.c +++ b/net/netfilter/ipvs/ip_vs_rr.c @@ -55,7 +55,8 @@ static int ip_vs_rr_del_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest) * Round-Robin Scheduling */ static struct ip_vs_dest * -ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct list_head *p; struct ip_vs_dest *dest, *last; diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c index f3205925359a..a5284cc3d882 100644 --- a/net/netfilter/ipvs/ip_vs_sed.c +++ b/net/netfilter/ipvs/ip_vs_sed.c @@ -59,7 +59,8 @@ ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) * Weighted Least Connection scheduling */ static struct ip_vs_dest * -ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest, *least; unsigned int loh, doh; diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index a65edfe4b16c..e0d5d1653566 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -227,18 +227,16 @@ static inline int is_overloaded(struct ip_vs_dest *dest) * Source Hashing scheduling */ static struct ip_vs_dest * -ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest; struct ip_vs_sh_state *s; - struct ip_vs_iphdr iph; - - ip_vs_fill_iph_addr_only(svc->af, skb, &iph); IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n"); s = (struct ip_vs_sh_state *) svc->sched_data; - dest = ip_vs_sh_get(svc->af, s, &iph.saddr); + dest = ip_vs_sh_get(svc->af, s, &iph->saddr); if (!dest || !(dest->flags & IP_VS_DEST_F_AVAILABLE) || atomic_read(&dest->weight) <= 0 @@ -248,7 +246,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) } IP_VS_DBG_BUF(6, "SH: source IP address %s --> server %s:%d\n", - IP_VS_DBG_ADDR(svc->af, &iph.saddr), + IP_VS_DBG_ADDR(svc->af, &iph->saddr), IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index c60a81c4ce9a..6dc1fa128840 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c @@ -31,7 +31,8 @@ * Weighted Least Connection scheduling */ static struct ip_vs_dest * -ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest, *least; unsigned int loh, doh; diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index 0e68555bceb9..0546cd572d6b 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c @@ -162,7 +162,8 @@ static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc, * Weighted Round-Robin Scheduling */ static struct ip_vs_dest * -ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) +ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest, *last, *stop = NULL; struct ip_vs_wrr_mark *mark = svc->sched_data; -- cgit v1.2.3 From c6c96c188336b2b95d5f14facd101f1e4165a9d3 Mon Sep 17 00:00:00 2001 From: Alexander Frolkin Date: Thu, 13 Jun 2013 08:56:15 +0100 Subject: ipvs: sloppy TCP and SCTP This adds support for sloppy TCP and SCTP modes to IPVS. When enabled (sysctls net.ipv4.vs.sloppy_tcp and net.ipv4.vs.sloppy_sctp), allows IPVS to create connection state on any packet, not just a TCP SYN (or SCTP INIT). This allows connections to fail over from one IPVS director to another mid-flight. Signed-off-by: Alexander Frolkin Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman --- include/net/ip_vs.h | 24 ++++++++++++++++++++++++ net/netfilter/ipvs/ip_vs_ctl.c | 14 ++++++++++++++ net/netfilter/ipvs/ip_vs_proto_sctp.c | 18 ++++++++++-------- net/netfilter/ipvs/ip_vs_proto_tcp.c | 14 ++++++++------ 4 files changed, 56 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index f5faf859876e..95860dfdfbe3 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -978,6 +978,8 @@ struct netns_ipvs { int sysctl_sync_sock_size; int sysctl_cache_bypass; int sysctl_expire_nodest_conn; + int sysctl_sloppy_tcp; + int sysctl_sloppy_sctp; int sysctl_expire_quiescent_template; int sysctl_sync_threshold[2]; unsigned int sysctl_sync_refresh_period; @@ -1020,6 +1022,8 @@ struct netns_ipvs { #define DEFAULT_SYNC_THRESHOLD 3 #define DEFAULT_SYNC_PERIOD 50 #define DEFAULT_SYNC_VER 1 +#define DEFAULT_SLOPPY_TCP 0 +#define DEFAULT_SLOPPY_SCTP 0 #define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) #define DEFAULT_SYNC_RETRIES 0 #define IPVS_SYNC_WAKEUP_RATE 8 @@ -1056,6 +1060,16 @@ static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) return ipvs->sysctl_sync_ver; } +static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sloppy_tcp; +} + +static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sloppy_sctp; +} + static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) { return ACCESS_ONCE(ipvs->sysctl_sync_ports); @@ -1109,6 +1123,16 @@ static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) return DEFAULT_SYNC_VER; } +static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) +{ + return DEFAULT_SLOPPY_TCP; +} + +static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) +{ + return DEFAULT_SLOPPY_SCTP; +} + static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) { return 1; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 47e510819f54..da035fc01eb2 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1738,6 +1738,18 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "sloppy_tcp", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sloppy_sctp", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "expire_quiescent_template", .maxlen = sizeof(int), @@ -3723,6 +3735,8 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) tbl[idx++].data = &ipvs->sysctl_sync_sock_size; tbl[idx++].data = &ipvs->sysctl_cache_bypass; tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn; + tbl[idx++].data = &ipvs->sysctl_sloppy_tcp; + tbl[idx++].data = &ipvs->sysctl_sloppy_sctp; tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template; ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD; ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 86464881cd20..df29d6417043 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -15,6 +15,7 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, { struct net *net; struct ip_vs_service *svc; + struct netns_ipvs *ipvs; sctp_chunkhdr_t _schunkh, *sch; sctp_sctphdr_t *sh, _sctph; @@ -27,13 +28,14 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, if (sch == NULL) return 0; net = skb_net(skb); + ipvs = net_ipvs(net); rcu_read_lock(); - if ((sch->type == SCTP_CID_INIT) && + if ((sch->type == SCTP_CID_INIT || sysctl_sloppy_sctp(ipvs)) && (svc = ip_vs_service_find(net, af, skb->mark, iph->protocol, &iph->daddr, sh->dest))) { int ignored; - if (ip_vs_todrop(net_ipvs(net))) { + if (ip_vs_todrop(ipvs)) { /* * It seems that we are very loaded. * We have to drop this packet :( @@ -232,21 +234,21 @@ static struct ipvs_sctp_nextstate * STATE : IP_VS_SCTP_S_NONE */ /*next state *//*event */ - {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ }, + {{IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_CLI */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, + {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, + {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, + {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ }, + {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, + {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }, diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 50a15944c6c1..e3a697234a98 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -39,6 +39,7 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, struct net *net; struct ip_vs_service *svc; struct tcphdr _tcph, *th; + struct netns_ipvs *ipvs; th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph); if (th == NULL) { @@ -46,14 +47,15 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, return 0; } net = skb_net(skb); + ipvs = net_ipvs(net); /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ rcu_read_lock(); - if (th->syn && + if ((th->syn || sysctl_sloppy_tcp(ipvs)) && !th->rst && (svc = ip_vs_service_find(net, af, skb->mark, iph->protocol, &iph->daddr, th->dest))) { int ignored; - if (ip_vs_todrop(net_ipvs(net))) { + if (ip_vs_todrop(ipvs)) { /* * It seems that we are very loaded. * We have to drop this packet :( @@ -401,7 +403,7 @@ static struct tcp_states_t tcp_states [] = { /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }}, /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }}, -/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, +/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }}, /* OUTPUT */ @@ -415,7 +417,7 @@ static struct tcp_states_t tcp_states [] = { /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }}, /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }}, -/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, +/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, }; @@ -424,7 +426,7 @@ static struct tcp_states_t tcp_states_dos [] = { /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }}, /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }}, -/*ack*/ {{sCL, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }}, +/*ack*/ {{sES, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, /* OUTPUT */ @@ -438,7 +440,7 @@ static struct tcp_states_t tcp_states_dos [] = { /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }}, /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }}, -/*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, +/*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, }; -- cgit v1.2.3 From 61e7c420b4b2a797ac209106ba743ab6ebe984d8 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Tue, 18 Jun 2013 10:08:07 +0300 Subject: ipvs: replace the SCTP state machine Convert the SCTP state table, so that it is more readable. Change the states to be according to the diagram in RFC 2960 and add more states suitable for middle box. Still, such change in states adds incompatibility if systems in sync setup include this change and others do not include it. With this change we also have proper transitions in INPUT-ONLY mode (DR/TUN) where we see packets only from client. Now we should not switch to 10-second CLOSED state at a time when we should stay in ESTABLISHED state. The short names for states are because we have 16-char space in ipvsadm and 11-char limit for the connection list format. It is a sequence of the TCP implementation where the longest state name is ESTABLISHED. Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman --- include/net/ip_vs.h | 21 +- net/netfilter/ipvs/ip_vs_proto_sctp.c | 854 ++++++---------------------------- net/netfilter/ipvs/ip_vs_sync.c | 7 +- 3 files changed, 168 insertions(+), 714 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 95860dfdfbe3..e667df171003 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -380,17 +380,18 @@ enum { */ enum ip_vs_sctp_states { IP_VS_SCTP_S_NONE, - IP_VS_SCTP_S_INIT_CLI, - IP_VS_SCTP_S_INIT_SER, - IP_VS_SCTP_S_INIT_ACK_CLI, - IP_VS_SCTP_S_INIT_ACK_SER, - IP_VS_SCTP_S_ECHO_CLI, - IP_VS_SCTP_S_ECHO_SER, + IP_VS_SCTP_S_INIT1, + IP_VS_SCTP_S_INIT, + IP_VS_SCTP_S_COOKIE_SENT, + IP_VS_SCTP_S_COOKIE_REPLIED, + IP_VS_SCTP_S_COOKIE_WAIT, + IP_VS_SCTP_S_COOKIE, + IP_VS_SCTP_S_COOKIE_ECHOED, IP_VS_SCTP_S_ESTABLISHED, - IP_VS_SCTP_S_SHUT_CLI, - IP_VS_SCTP_S_SHUT_SER, - IP_VS_SCTP_S_SHUT_ACK_CLI, - IP_VS_SCTP_S_SHUT_ACK_SER, + IP_VS_SCTP_S_SHUTDOWN_SENT, + IP_VS_SCTP_S_SHUTDOWN_RECEIVED, + IP_VS_SCTP_S_SHUTDOWN_ACK_SENT, + IP_VS_SCTP_S_REJECTED, IP_VS_SCTP_S_CLOSED, IP_VS_SCTP_S_LAST }; diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index df29d6417043..3c0da8728036 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -185,710 +185,159 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) return 1; } -struct ipvs_sctp_nextstate { - int next_state; -}; enum ipvs_sctp_event_t { - IP_VS_SCTP_EVE_DATA_CLI, - IP_VS_SCTP_EVE_DATA_SER, - IP_VS_SCTP_EVE_INIT_CLI, - IP_VS_SCTP_EVE_INIT_SER, - IP_VS_SCTP_EVE_INIT_ACK_CLI, - IP_VS_SCTP_EVE_INIT_ACK_SER, - IP_VS_SCTP_EVE_COOKIE_ECHO_CLI, - IP_VS_SCTP_EVE_COOKIE_ECHO_SER, - IP_VS_SCTP_EVE_COOKIE_ACK_CLI, - IP_VS_SCTP_EVE_COOKIE_ACK_SER, - IP_VS_SCTP_EVE_ABORT_CLI, - IP_VS_SCTP_EVE__ABORT_SER, - IP_VS_SCTP_EVE_SHUT_CLI, - IP_VS_SCTP_EVE_SHUT_SER, - IP_VS_SCTP_EVE_SHUT_ACK_CLI, - IP_VS_SCTP_EVE_SHUT_ACK_SER, - IP_VS_SCTP_EVE_SHUT_COM_CLI, - IP_VS_SCTP_EVE_SHUT_COM_SER, - IP_VS_SCTP_EVE_LAST + IP_VS_SCTP_DATA = 0, /* DATA, SACK, HEARTBEATs */ + IP_VS_SCTP_INIT, + IP_VS_SCTP_INIT_ACK, + IP_VS_SCTP_COOKIE_ECHO, + IP_VS_SCTP_COOKIE_ACK, + IP_VS_SCTP_SHUTDOWN, + IP_VS_SCTP_SHUTDOWN_ACK, + IP_VS_SCTP_SHUTDOWN_COMPLETE, + IP_VS_SCTP_ERROR, + IP_VS_SCTP_ABORT, + IP_VS_SCTP_EVENT_LAST }; -static enum ipvs_sctp_event_t sctp_events[256] = { - IP_VS_SCTP_EVE_DATA_CLI, - IP_VS_SCTP_EVE_INIT_CLI, - IP_VS_SCTP_EVE_INIT_ACK_CLI, - IP_VS_SCTP_EVE_DATA_CLI, - IP_VS_SCTP_EVE_DATA_CLI, - IP_VS_SCTP_EVE_DATA_CLI, - IP_VS_SCTP_EVE_ABORT_CLI, - IP_VS_SCTP_EVE_SHUT_CLI, - IP_VS_SCTP_EVE_SHUT_ACK_CLI, - IP_VS_SCTP_EVE_DATA_CLI, - IP_VS_SCTP_EVE_COOKIE_ECHO_CLI, - IP_VS_SCTP_EVE_COOKIE_ACK_CLI, - IP_VS_SCTP_EVE_DATA_CLI, - IP_VS_SCTP_EVE_DATA_CLI, - IP_VS_SCTP_EVE_SHUT_COM_CLI, +/* RFC 2960, 3.2 Chunk Field Descriptions */ +static __u8 sctp_events[] = { + [SCTP_CID_DATA] = IP_VS_SCTP_DATA, + [SCTP_CID_INIT] = IP_VS_SCTP_INIT, + [SCTP_CID_INIT_ACK] = IP_VS_SCTP_INIT_ACK, + [SCTP_CID_SACK] = IP_VS_SCTP_DATA, + [SCTP_CID_HEARTBEAT] = IP_VS_SCTP_DATA, + [SCTP_CID_HEARTBEAT_ACK] = IP_VS_SCTP_DATA, + [SCTP_CID_ABORT] = IP_VS_SCTP_ABORT, + [SCTP_CID_SHUTDOWN] = IP_VS_SCTP_SHUTDOWN, + [SCTP_CID_SHUTDOWN_ACK] = IP_VS_SCTP_SHUTDOWN_ACK, + [SCTP_CID_ERROR] = IP_VS_SCTP_ERROR, + [SCTP_CID_COOKIE_ECHO] = IP_VS_SCTP_COOKIE_ECHO, + [SCTP_CID_COOKIE_ACK] = IP_VS_SCTP_COOKIE_ACK, + [SCTP_CID_ECN_ECNE] = IP_VS_SCTP_DATA, + [SCTP_CID_ECN_CWR] = IP_VS_SCTP_DATA, + [SCTP_CID_SHUTDOWN_COMPLETE] = IP_VS_SCTP_SHUTDOWN_COMPLETE, }; -static struct ipvs_sctp_nextstate - sctp_states_table[IP_VS_SCTP_S_LAST][IP_VS_SCTP_EVE_LAST] = { - /* - * STATE : IP_VS_SCTP_S_NONE - */ - /*next state *//*event */ - {{IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }, - }, - /* - * STATE : IP_VS_SCTP_S_INIT_CLI - * Cient sent INIT and is waiting for reply from server(In ECHO_WAIT) - */ - {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ECHO_CLI */ }, - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_ECHO_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_INIT_SER - * Server sent INIT and waiting for INIT ACK from the client - */ - {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_INIT_ACK_CLI - * Client sent INIT ACK and waiting for ECHO from the server - */ - {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * INIT_ACK has been resent by the client, let us stay is in - * the same state - */ - {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - /* - * INIT_ACK sent by the server, close the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * ECHO by client, it should not happen, close the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - /* - * ECHO by server, this is what we are expecting, move to ECHO_SER - */ - {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, it should not happen, close the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - /* - * Unexpected COOKIE ACK from server, staty in the same state - */ - {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_INIT_ACK_SER - * Server sent INIT ACK and waiting for ECHO from the client - */ - {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * Unexpected INIT_ACK by the client, let us close the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - /* - * INIT_ACK resent by the server, let us move to same state - */ - {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * Client send the ECHO, this is what we are expecting, - * move to ECHO_CLI - */ - {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - /* - * ECHO received from the server, Not sure what to do, - * let us close it - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, let us stay in the same state - */ - {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - /* - * COOKIE ACK from server, hmm... this should not happen, lets close - * the connection. - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_ECHO_CLI - * Cient sent ECHO and waiting COOKEI ACK from the Server - */ - {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * INIT_ACK has been by the client, let us close the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - /* - * INIT_ACK sent by the server, Unexpected INIT ACK, spec says, - * “If an INIT ACK is received by an endpoint in any state other - * than the COOKIE-WAIT state, the endpoint should discard the - * INIT ACK chunk”. Stay in the same state - */ - {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * Client resent the ECHO, let us stay in the same state - */ - {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - /* - * ECHO received from the server, Not sure what to do, - * let us close it - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, this shoud not happen, let's close the - * connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - /* - * COOKIE ACK from server, this is what we are awaiting,lets move to - * ESTABLISHED. - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_ECHO_SER - * Server sent ECHO and waiting COOKEI ACK from the client - */ - {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * INIT_ACK sent by the server, Unexpected INIT ACK, spec says, - * “If an INIT ACK is received by an endpoint in any state other - * than the COOKIE-WAIT state, the endpoint should discard the - * INIT ACK chunk”. Stay in the same state - */ - {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - /* - * INIT_ACK has been by the server, let us close the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * Client sent the ECHO, not sure what to do, let's close the - * connection. - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - /* - * ECHO resent by the server, stay in the same state - */ - {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, this is what we are expecting, let's move - * to ESTABLISHED. - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - /* - * COOKIE ACK from server, this should not happen, lets close the - * connection. - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_ESTABLISHED - * Association established - */ - {{IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * INIT_ACK sent by the server, Unexpected INIT ACK, spec says, - * “If an INIT ACK is received by an endpoint in any state other - * than the COOKIE-WAIT state, the endpoint should discard the - * INIT ACK chunk”. Stay in the same state - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the - * peer and peer shall move to the ESTABISHED. if it doesn't handle - * it will send ERROR chunk. So, stay in the same state - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, not sure what to do stay in the same state - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - /* - * SHUTDOWN from the client, move to SHUDDOWN_CLI - */ - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - /* - * SHUTDOWN from the server, move to SHUTDOWN_SER - */ - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ }, - /* - * client sent SHUDTDOWN_ACK, this should not happen, let's close - * the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_SHUT_CLI - * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server - */ - /* - * We received the data chuck, keep the state unchanged. I assume - * that still data chuncks can be received by both the peers in - * SHUDOWN state - */ - - {{IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * INIT_ACK sent by the server, Unexpected INIT ACK, spec says, - * “If an INIT ACK is received by an endpoint in any state other - * than the COOKIE-WAIT state, the endpoint should discard the - * INIT ACK chunk”. Stay in the same state - */ - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the - * peer and peer shall move to the ESTABISHED. if it doesn't handle - * it will send ERROR chunk. So, stay in the same state - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, not sure what to do stay in the same state - */ - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - /* - * SHUTDOWN resent from the client, move to SHUDDOWN_CLI - */ - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - /* - * SHUTDOWN from the server, move to SHUTDOWN_SER - */ - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ }, - /* - * client sent SHUDTDOWN_ACK, this should not happen, let's close - * the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - /* - * Server sent SHUTDOWN ACK, this is what we are expecting, let's move - * to SHUDOWN_ACK_SER - */ - {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - /* - * SHUTDOWN COM from client, this should not happen, let's close the - * connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_SHUT_SER - * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client - */ - /* - * We received the data chuck, keep the state unchanged. I assume - * that still data chuncks can be received by both the peers in - * SHUDOWN state - */ - - {{IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * INIT_ACK sent by the server, Unexpected INIT ACK, spec says, - * “If an INIT ACK is received by an endpoint in any state other - * than the COOKIE-WAIT state, the endpoint should discard the - * INIT ACK chunk”. Stay in the same state - */ - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the - * peer and peer shall move to the ESTABISHED. if it doesn't handle - * it will send ERROR chunk. So, stay in the same state - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, not sure what to do stay in the same state - */ - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - /* - * SHUTDOWN resent from the client, move to SHUDDOWN_CLI - */ - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - /* - * SHUTDOWN resent from the server, move to SHUTDOWN_SER - */ - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ }, - /* - * client sent SHUDTDOWN_ACK, this is what we are expecting, let's - * move to SHUT_ACK_CLI - */ - {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - /* - * Server sent SHUTDOWN ACK, this should not happen, let's close the - * connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - /* - * SHUTDOWN COM from client, this should not happen, let's close the - * connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - - /* - * State : IP_VS_SCTP_S_SHUT_ACK_CLI - * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server - */ - /* - * We received the data chuck, keep the state unchanged. I assume - * that still data chuncks can be received by both the peers in - * SHUDOWN state - */ - - {{IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * INIT_ACK sent by the server, Unexpected INIT ACK, spec says, - * “If an INIT ACK is received by an endpoint in any state other - * than the COOKIE-WAIT state, the endpoint should discard the - * INIT ACK chunk”. Stay in the same state - */ - {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the - * peer and peer shall move to the ESTABISHED. if it doesn't handle - * it will send ERROR chunk. So, stay in the same state - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, not sure what to do stay in the same state - */ - {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - /* - * SHUTDOWN sent from the client, move to SHUDDOWN_CLI - */ - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - /* - * SHUTDOWN sent from the server, move to SHUTDOWN_SER - */ - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ }, - /* - * client resent SHUDTDOWN_ACK, let's stay in the same state - */ - {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - /* - * Server sent SHUTDOWN ACK, this should not happen, let's close the - * connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - /* - * SHUTDOWN COM from client, this should not happen, let's close the - * connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - /* - * SHUTDOWN COMPLETE from server this is what we are expecting. - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - - /* - * State : IP_VS_SCTP_S_SHUT_ACK_SER - * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client - */ - /* - * We received the data chuck, keep the state unchanged. I assume - * that still data chuncks can be received by both the peers in - * SHUDOWN state - */ +/* SCTP States: + * See RFC 2960, 4. SCTP Association State Diagram + * + * New states (not in diagram): + * - INIT1 state: use shorter timeout for dropped INIT packets + * - REJECTED state: use shorter timeout if INIT is rejected with ABORT + * - INIT, COOKIE_SENT, COOKIE_REPLIED, COOKIE states: for better debugging + * + * The states are as seen in real server. In the diagram, INIT1, INIT, + * COOKIE_SENT and COOKIE_REPLIED processing happens in CLOSED state. + * + * States as per packets from client (C) and server (S): + * + * Setup of client connection: + * IP_VS_SCTP_S_INIT1: First C:INIT sent, wait for S:INIT-ACK + * IP_VS_SCTP_S_INIT: Next C:INIT sent, wait for S:INIT-ACK + * IP_VS_SCTP_S_COOKIE_SENT: S:INIT-ACK sent, wait for C:COOKIE-ECHO + * IP_VS_SCTP_S_COOKIE_REPLIED: C:COOKIE-ECHO sent, wait for S:COOKIE-ACK + * + * Setup of server connection: + * IP_VS_SCTP_S_COOKIE_WAIT: S:INIT sent, wait for C:INIT-ACK + * IP_VS_SCTP_S_COOKIE: C:INIT-ACK sent, wait for S:COOKIE-ECHO + * IP_VS_SCTP_S_COOKIE_ECHOED: S:COOKIE-ECHO sent, wait for C:COOKIE-ACK + */ - {{IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_SER */ }, - /* - * We have got an INIT from client. From the spec.“Upon receipt of - * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with - * an INIT ACK using the same parameters it sent in its original - * INIT chunk (including its Initiate Tag, unchanged”). - */ - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - /* - * INIT_ACK sent by the server, Unexpected INIT ACK, spec says, - * “If an INIT ACK is received by an endpoint in any state other - * than the COOKIE-WAIT state, the endpoint should discard the - * INIT ACK chunk”. Stay in the same state - */ - {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - /* - * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the - * peer and peer shall move to the ESTABISHED. if it doesn't handle - * it will send ERROR chunk. So, stay in the same state - */ - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - /* - * COOKIE ACK from client, not sure what to do stay in the same state - */ - {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - /* - * SHUTDOWN sent from the client, move to SHUDDOWN_CLI - */ - {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - /* - * SHUTDOWN sent from the server, move to SHUTDOWN_SER - */ - {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ }, - /* - * client sent SHUDTDOWN_ACK, this should not happen let's close - * the connection. - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - /* - * Server resent SHUTDOWN ACK, stay in the same state - */ - {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - /* - * SHUTDOWN COM from client, this what we are expecting, let's close - * the connection - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - /* - * SHUTDOWN COMPLETE from server this should not happen. - */ - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - }, - /* - * State : IP_VS_SCTP_S_CLOSED - */ - {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ }, - {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ }, - {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ }, - {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ } - } +#define sNO IP_VS_SCTP_S_NONE +#define sI1 IP_VS_SCTP_S_INIT1 +#define sIN IP_VS_SCTP_S_INIT +#define sCS IP_VS_SCTP_S_COOKIE_SENT +#define sCR IP_VS_SCTP_S_COOKIE_REPLIED +#define sCW IP_VS_SCTP_S_COOKIE_WAIT +#define sCO IP_VS_SCTP_S_COOKIE +#define sCE IP_VS_SCTP_S_COOKIE_ECHOED +#define sES IP_VS_SCTP_S_ESTABLISHED +#define sSS IP_VS_SCTP_S_SHUTDOWN_SENT +#define sSR IP_VS_SCTP_S_SHUTDOWN_RECEIVED +#define sSA IP_VS_SCTP_S_SHUTDOWN_ACK_SENT +#define sRJ IP_VS_SCTP_S_REJECTED +#define sCL IP_VS_SCTP_S_CLOSED + +static const __u8 sctp_states + [IP_VS_DIR_LAST][IP_VS_SCTP_EVENT_LAST][IP_VS_SCTP_S_LAST] = { + { /* INPUT */ +/* sNO, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL*/ +/* d */{sES, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* i */{sI1, sIN, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sIN, sIN}, +/* i_a */{sCW, sCW, sCW, sCS, sCR, sCO, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* c_e */{sCR, sIN, sIN, sCR, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* c_a */{sES, sI1, sIN, sCS, sCR, sCW, sCO, sES, sES, sSS, sSR, sSA, sRJ, sCL}, +/* s */{sSR, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sSR, sSS, sSR, sSA, sRJ, sCL}, +/* s_a */{sCL, sIN, sIN, sCS, sCR, sCW, sCO, sCE, sES, sCL, sSR, sCL, sRJ, sCL}, +/* s_c */{sCL, sCL, sCL, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sCL, sRJ, sCL}, +/* err */{sCL, sI1, sIN, sCS, sCR, sCW, sCO, sCL, sES, sSS, sSR, sSA, sRJ, sCL}, +/* ab */{sCL, sCL, sCL, sCL, sCL, sRJ, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, + }, + { /* OUTPUT */ +/* sNO, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL*/ +/* d */{sES, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* i */{sCW, sCW, sCW, sCW, sCW, sCW, sCW, sCW, sES, sCW, sCW, sCW, sCW, sCW}, +/* i_a */{sCS, sCS, sCS, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* c_e */{sCE, sCE, sCE, sCE, sCE, sCE, sCE, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* c_a */{sES, sES, sES, sES, sES, sES, sES, sES, sES, sSS, sSR, sSA, sRJ, sCL}, +/* s */{sSS, sSS, sSS, sSS, sSS, sSS, sSS, sSS, sSS, sSS, sSR, sSA, sRJ, sCL}, +/* s_a */{sSA, sSA, sSA, sSA, sSA, sCW, sCO, sCE, sES, sSA, sSA, sSA, sRJ, sCL}, +/* s_c */{sCL, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* err */{sCL, sCL, sCL, sCL, sCL, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* ab */{sCL, sRJ, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, + }, + { /* INPUT-ONLY */ +/* sNO, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL*/ +/* d */{sES, sI1, sIN, sCS, sCR, sES, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* i */{sI1, sIN, sIN, sIN, sIN, sIN, sCO, sCE, sES, sSS, sSR, sSA, sIN, sIN}, +/* i_a */{sCE, sCE, sCE, sCE, sCE, sCE, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* c_e */{sES, sES, sES, sES, sES, sES, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* c_a */{sES, sI1, sIN, sES, sES, sCW, sES, sES, sES, sSS, sSR, sSA, sRJ, sCL}, +/* s */{sSR, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sSR, sSS, sSR, sSA, sRJ, sCL}, +/* s_a */{sCL, sIN, sIN, sCS, sCR, sCW, sCO, sCE, sCL, sCL, sSR, sCL, sRJ, sCL}, +/* s_c */{sCL, sCL, sCL, sCL, sCL, sCW, sCO, sCE, sES, sSS, sCL, sCL, sRJ, sCL}, +/* err */{sCL, sI1, sIN, sCS, sCR, sCW, sCO, sCE, sES, sSS, sSR, sSA, sRJ, sCL}, +/* ab */{sCL, sCL, sCL, sCL, sCL, sRJ, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, + }, }; -/* - * Timeout table[state] - */ +#define IP_VS_SCTP_MAX_RTO ((60 + 1) * HZ) + +/* Timeout table[state] */ static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = { - [IP_VS_SCTP_S_NONE] = 2 * HZ, - [IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ, - [IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ, - [IP_VS_SCTP_S_INIT_ACK_CLI] = 1 * 60 * HZ, - [IP_VS_SCTP_S_INIT_ACK_SER] = 1 * 60 * HZ, - [IP_VS_SCTP_S_ECHO_CLI] = 1 * 60 * HZ, - [IP_VS_SCTP_S_ECHO_SER] = 1 * 60 * HZ, - [IP_VS_SCTP_S_ESTABLISHED] = 15 * 60 * HZ, - [IP_VS_SCTP_S_SHUT_CLI] = 1 * 60 * HZ, - [IP_VS_SCTP_S_SHUT_SER] = 1 * 60 * HZ, - [IP_VS_SCTP_S_SHUT_ACK_CLI] = 1 * 60 * HZ, - [IP_VS_SCTP_S_SHUT_ACK_SER] = 1 * 60 * HZ, - [IP_VS_SCTP_S_CLOSED] = 10 * HZ, - [IP_VS_SCTP_S_LAST] = 2 * HZ, + [IP_VS_SCTP_S_NONE] = 2 * HZ, + [IP_VS_SCTP_S_INIT1] = (0 + 3 + 1) * HZ, + [IP_VS_SCTP_S_INIT] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_COOKIE_SENT] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_COOKIE_REPLIED] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_COOKIE_WAIT] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_COOKIE] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_COOKIE_ECHOED] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_ESTABLISHED] = 15 * 60 * HZ, + [IP_VS_SCTP_S_SHUTDOWN_SENT] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_SHUTDOWN_RECEIVED] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_SHUTDOWN_ACK_SENT] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_REJECTED] = (0 + 3 + 1) * HZ, + [IP_VS_SCTP_S_CLOSED] = IP_VS_SCTP_MAX_RTO, + [IP_VS_SCTP_S_LAST] = 2 * HZ, }; static const char *sctp_state_name_table[IP_VS_SCTP_S_LAST + 1] = { - [IP_VS_SCTP_S_NONE] = "NONE", - [IP_VS_SCTP_S_INIT_CLI] = "INIT_CLI", - [IP_VS_SCTP_S_INIT_SER] = "INIT_SER", - [IP_VS_SCTP_S_INIT_ACK_CLI] = "INIT_ACK_CLI", - [IP_VS_SCTP_S_INIT_ACK_SER] = "INIT_ACK_SER", - [IP_VS_SCTP_S_ECHO_CLI] = "COOKIE_ECHO_CLI", - [IP_VS_SCTP_S_ECHO_SER] = "COOKIE_ECHO_SER", - [IP_VS_SCTP_S_ESTABLISHED] = "ESTABISHED", - [IP_VS_SCTP_S_SHUT_CLI] = "SHUTDOWN_CLI", - [IP_VS_SCTP_S_SHUT_SER] = "SHUTDOWN_SER", - [IP_VS_SCTP_S_SHUT_ACK_CLI] = "SHUTDOWN_ACK_CLI", - [IP_VS_SCTP_S_SHUT_ACK_SER] = "SHUTDOWN_ACK_SER", - [IP_VS_SCTP_S_CLOSED] = "CLOSED", - [IP_VS_SCTP_S_LAST] = "BUG!" + [IP_VS_SCTP_S_NONE] = "NONE", + [IP_VS_SCTP_S_INIT1] = "INIT1", + [IP_VS_SCTP_S_INIT] = "INIT", + [IP_VS_SCTP_S_COOKIE_SENT] = "C-SENT", + [IP_VS_SCTP_S_COOKIE_REPLIED] = "C-REPLIED", + [IP_VS_SCTP_S_COOKIE_WAIT] = "C-WAIT", + [IP_VS_SCTP_S_COOKIE] = "COOKIE", + [IP_VS_SCTP_S_COOKIE_ECHOED] = "C-ECHOED", + [IP_VS_SCTP_S_ESTABLISHED] = "ESTABLISHED", + [IP_VS_SCTP_S_SHUTDOWN_SENT] = "S-SENT", + [IP_VS_SCTP_S_SHUTDOWN_RECEIVED] = "S-RECEIVED", + [IP_VS_SCTP_S_SHUTDOWN_ACK_SENT] = "S-ACK-SENT", + [IP_VS_SCTP_S_REJECTED] = "REJECTED", + [IP_VS_SCTP_S_CLOSED] = "CLOSED", + [IP_VS_SCTP_S_LAST] = "BUG!", }; @@ -945,17 +394,20 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, } } - event = sctp_events[chunk_type]; + event = (chunk_type < sizeof(sctp_events)) ? + sctp_events[chunk_type] : IP_VS_SCTP_DATA; - /* - * If the direction is IP_VS_DIR_OUTPUT, this event is from server - */ - if (direction == IP_VS_DIR_OUTPUT) - event++; - /* - * get next state + /* Update direction to INPUT_ONLY if necessary + * or delete NO_OUTPUT flag if output packet detected */ - next_state = sctp_states_table[cp->state][event].next_state; + if (cp->flags & IP_VS_CONN_F_NOOUTPUT) { + if (direction == IP_VS_DIR_OUTPUT) + cp->flags &= ~IP_VS_CONN_F_NOOUTPUT; + else + direction = IP_VS_DIR_INPUT_ONLY; + } + + next_state = sctp_states[direction][event][cp->state]; if (next_state != cp->state) { struct ip_vs_dest *dest = cp->dest; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index f6046d9af8d3..2fc66394d86d 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -461,9 +461,10 @@ static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { if (!((1 << cp->state) & ((1 << IP_VS_SCTP_S_ESTABLISHED) | - (1 << IP_VS_SCTP_S_CLOSED) | - (1 << IP_VS_SCTP_S_SHUT_ACK_CLI) | - (1 << IP_VS_SCTP_S_SHUT_ACK_SER)))) + (1 << IP_VS_SCTP_S_SHUTDOWN_SENT) | + (1 << IP_VS_SCTP_S_SHUTDOWN_RECEIVED) | + (1 << IP_VS_SCTP_S_SHUTDOWN_ACK_SENT) | + (1 << IP_VS_SCTP_S_CLOSED)))) return 0; force = cp->state != cp->old_state; if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) -- cgit v1.2.3 From eba3b5a78799d21dea05118b294524958f0ab592 Mon Sep 17 00:00:00 2001 From: Alexander Frolkin Date: Wed, 19 Jun 2013 10:54:25 +0100 Subject: ipvs: SH fallback and L4 hashing By default the SH scheduler rejects connections that are hashed onto a realserver of weight 0. This patch adds a flag to make SH choose a different realserver in this case, instead of rejecting the connection. The patch also adds a flag to make SH include the source port (TCP, UDP, SCTP) in the hash as well as the source address. This basically allows for deterministic round-robin load balancing (i.e., where any director in a cluster of directors with identical config will send the same packet the same way). The flags are service flags (IP_VS_SVC_F_SCHED*) so that these options can be set per service. They are set using a new option to ipvsadm. Signed-off-by: Alexander Frolkin Acked-by: Julian Anastasov Signed-off-by: Simon Horman --- include/uapi/linux/ip_vs.h | 6 +++ net/netfilter/ipvs/ip_vs_sh.c | 100 +++++++++++++++++++++++++++++++++++------- 2 files changed, 91 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h index a24537725e80..29458223d044 100644 --- a/include/uapi/linux/ip_vs.h +++ b/include/uapi/linux/ip_vs.h @@ -20,6 +20,12 @@ #define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ #define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ #define IP_VS_SVC_F_ONEPACKET 0x0004 /* one-packet scheduling */ +#define IP_VS_SVC_F_SCHED1 0x0008 /* scheduler flag 1 */ +#define IP_VS_SVC_F_SCHED2 0x0010 /* scheduler flag 2 */ +#define IP_VS_SVC_F_SCHED3 0x0020 /* scheduler flag 3 */ + +#define IP_VS_SVC_F_SCHED_SH_FALLBACK IP_VS_SVC_F_SCHED1 /* SH fallback */ +#define IP_VS_SVC_F_SCHED_SH_PORT IP_VS_SVC_F_SCHED2 /* SH use port */ /* * Destination Server Flags diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index e0d5d1653566..f16c027df15b 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -48,6 +48,10 @@ #include +#include +#include +#include + /* * IPVS SH bucket @@ -71,10 +75,19 @@ struct ip_vs_sh_state { struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; }; +/* Helper function to determine if server is unavailable */ +static inline bool is_unavailable(struct ip_vs_dest *dest) +{ + return atomic_read(&dest->weight) <= 0 || + dest->flags & IP_VS_DEST_F_OVERLOAD; +} + /* * Returns hash value for IPVS SH entry */ -static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr) +static inline unsigned int +ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr, + __be16 port, unsigned int offset) { __be32 addr_fold = addr->ip; @@ -83,7 +96,8 @@ static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *ad addr_fold = addr->ip6[0]^addr->ip6[1]^ addr->ip6[2]^addr->ip6[3]; #endif - return (ntohl(addr_fold)*2654435761UL) & IP_VS_SH_TAB_MASK; + return (offset + (ntohs(port) + ntohl(addr_fold))*2654435761UL) & + IP_VS_SH_TAB_MASK; } @@ -91,12 +105,42 @@ static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *ad * Get ip_vs_dest associated with supplied parameters. */ static inline struct ip_vs_dest * -ip_vs_sh_get(int af, struct ip_vs_sh_state *s, const union nf_inet_addr *addr) +ip_vs_sh_get(struct ip_vs_service *svc, struct ip_vs_sh_state *s, + const union nf_inet_addr *addr, __be16 port) { - return rcu_dereference(s->buckets[ip_vs_sh_hashkey(af, addr)].dest); + unsigned int hash = ip_vs_sh_hashkey(svc->af, addr, port, 0); + struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); + + return (!dest || is_unavailable(dest)) ? NULL : dest; } +/* As ip_vs_sh_get, but with fallback if selected server is unavailable */ +static inline struct ip_vs_dest * +ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s, + const union nf_inet_addr *addr, __be16 port) +{ + unsigned int offset; + unsigned int hash; + struct ip_vs_dest *dest; + + for (offset = 0; offset < IP_VS_SH_TAB_SIZE; offset++) { + hash = ip_vs_sh_hashkey(svc->af, addr, port, offset); + dest = rcu_dereference(s->buckets[hash].dest); + if (!dest) + break; + if (is_unavailable(dest)) + IP_VS_DBG_BUF(6, "SH: selected unavailable server " + "%s:%d (offset %d)", + IP_VS_DBG_ADDR(svc->af, &dest->addr), + ntohs(dest->port), offset); + else + return dest; + } + + return NULL; +} + /* * Assign all the hash buckets of the specified table with the service. */ @@ -213,13 +257,33 @@ static int ip_vs_sh_dest_changed(struct ip_vs_service *svc, } -/* - * If the dest flags is set with IP_VS_DEST_F_OVERLOAD, - * consider that the server is overloaded here. - */ -static inline int is_overloaded(struct ip_vs_dest *dest) +/* Helper function to get port number */ +static inline __be16 +ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph) { - return dest->flags & IP_VS_DEST_F_OVERLOAD; + __be16 port; + struct tcphdr _tcph, *th; + struct udphdr _udph, *uh; + sctp_sctphdr_t _sctph, *sh; + + switch (iph->protocol) { + case IPPROTO_TCP: + th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph); + port = th->source; + break; + case IPPROTO_UDP: + uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph); + port = uh->source; + break; + case IPPROTO_SCTP: + sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph); + port = sh->source; + break; + default: + port = 0; + } + + return port; } @@ -232,15 +296,21 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, { struct ip_vs_dest *dest; struct ip_vs_sh_state *s; + __be16 port = 0; IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n"); + if (svc->flags & IP_VS_SVC_F_SCHED_SH_PORT) + port = ip_vs_sh_get_port(skb, iph); + s = (struct ip_vs_sh_state *) svc->sched_data; - dest = ip_vs_sh_get(svc->af, s, &iph->saddr); - if (!dest - || !(dest->flags & IP_VS_DEST_F_AVAILABLE) - || atomic_read(&dest->weight) <= 0 - || is_overloaded(dest)) { + + if (svc->flags & IP_VS_SVC_F_SCHED_SH_FALLBACK) + dest = ip_vs_sh_get_fallback(svc, s, &iph->saddr, port); + else + dest = ip_vs_sh_get(svc, s, &iph->saddr, port); + + if (!dest) { ip_vs_scheduler_err(svc, "no destination available"); return NULL; } -- cgit v1.2.3 From 4d0c875dcc4923476f364e83912d134da2df224c Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Mon, 24 Jun 2013 22:44:41 +0300 Subject: ipvs: add sync_persist_mode flag Add sync_persist_mode flag to reduce sync traffic by syncing only persistent templates. Signed-off-by: Julian Anastasov Tested-by: Aleksey Chudov Signed-off-by: Simon Horman --- Documentation/networking/ipvs-sysctl.txt | 13 +++++++++++++ include/net/ip_vs.h | 11 +++++++++++ net/netfilter/ipvs/ip_vs_ctl.c | 7 +++++++ net/netfilter/ipvs/ip_vs_sync.c | 12 ++++++++++++ 4 files changed, 43 insertions(+) (limited to 'include') diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt index 9573d0c48c6e..7a3c04729591 100644 --- a/Documentation/networking/ipvs-sysctl.txt +++ b/Documentation/networking/ipvs-sysctl.txt @@ -181,6 +181,19 @@ snat_reroute - BOOLEAN always be the same as the original route so it is an optimisation to disable snat_reroute and avoid the recalculation. +sync_persist_mode - INTEGER + default 0 + + Controls the synchronisation of connections when using persistence + + 0: All types of connections are synchronised + 1: Attempt to reduce the synchronisation traffic depending on + the connection type. For persistent services avoid synchronisation + for normal connections, do it only for persistence templates. + In such case, for TCP and SCTP it may need enabling sloppy_tcp and + sloppy_sctp flags on backup servers. For non-persistent services + such optimization is not applied, mode 0 is assumed. + sync_version - INTEGER default 1 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index e667df171003..f0d70f066f3d 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -975,6 +975,7 @@ struct netns_ipvs { int sysctl_snat_reroute; int sysctl_sync_ver; int sysctl_sync_ports; + int sysctl_sync_persist_mode; unsigned long sysctl_sync_qlen_max; int sysctl_sync_sock_size; int sysctl_cache_bypass; @@ -1076,6 +1077,11 @@ static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) return ACCESS_ONCE(ipvs->sysctl_sync_ports); } +static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_persist_mode; +} + static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) { return ipvs->sysctl_sync_qlen_max; @@ -1139,6 +1145,11 @@ static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) return 1; } +static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) +{ + return 0; +} + static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) { return IPVS_SYNC_QLEN_MAX; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index da035fc01eb2..c8148e487386 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1714,6 +1714,12 @@ static struct ctl_table vs_vars[] = { .mode = 0644, .proc_handler = &proc_do_sync_ports, }, + { + .procname = "sync_persist_mode", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "sync_qlen_max", .maxlen = sizeof(unsigned long), @@ -3729,6 +3735,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) tbl[idx++].data = &ipvs->sysctl_sync_ver; ipvs->sysctl_sync_ports = 1; tbl[idx++].data = &ipvs->sysctl_sync_ports; + tbl[idx++].data = &ipvs->sysctl_sync_persist_mode; ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32; tbl[idx++].data = &ipvs->sysctl_sync_qlen_max; ipvs->sysctl_sync_sock_size = 0; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 2fc66394d86d..f4484719f3e6 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -425,6 +425,16 @@ ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs) return sb; } +/* Check if connection is controlled by persistence */ +static inline bool in_persistence(struct ip_vs_conn *cp) +{ + for (cp = cp->control; cp; cp = cp->control) { + if (cp->flags & IP_VS_CONN_F_TEMPLATE) + return true; + } + return false; +} + /* Check if conn should be synced. * pkts: conn packets, use sysctl_sync_threshold to avoid packet check * - (1) sync_refresh_period: reduce sync rate. Additionally, retry @@ -447,6 +457,8 @@ static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, /* Check if we sync in current state */ if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) force = 0; + else if (unlikely(sysctl_sync_persist_mode(ipvs) && in_persistence(cp))) + return 0; else if (likely(cp->protocol == IPPROTO_TCP)) { if (!((1 << cp->state) & ((1 << IP_VS_TCP_S_ESTABLISHED) | -- cgit v1.2.3 From 8b4d14d8eb36874daf159d33dcccd4746a6f3189 Mon Sep 17 00:00:00 2001 From: JunweiZhang Date: Wed, 26 Jun 2013 16:40:06 +0800 Subject: netns: exclude ipvs from struct net when IPVS disabled no real problem is fixed, just save a few bytes in net_namespace structure. Signed-off-by: JunweiZhang Signed-off-by: Nicolas Dichtel Reviewed-by: Julian Anastasov Signed-off-by: Simon Horman --- include/net/net_namespace.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 495bc57f292c..84e37b1ca9e1 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -115,7 +115,9 @@ struct net { #ifdef CONFIG_XFRM struct netns_xfrm xfrm; #endif +#if IS_ENABLED(CONFIG_IP_VS) struct netns_ipvs *ipvs; +#endif struct sock *diag_nlsk; atomic_t rt_genid; atomic_t fnhe_genid; -- cgit v1.2.3 From baffd2e8d9a562021f046ed52195aaac91a51412 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 10 Jun 2013 23:12:58 -0300 Subject: ARM: imx: flexcan: Remove platform file As there are no more users of the flexcan platform file, let's remove it. Cc: Sascha Hauer Acked-by: Shawn Guo Signed-off-by: Fabio Estevam Signed-off-by: Marc Kleine-Budde --- include/linux/can/platform/flexcan.h | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 include/linux/can/platform/flexcan.h (limited to 'include') diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h deleted file mode 100644 index 72b713ab57e9..000000000000 --- a/include/linux/can/platform/flexcan.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (C) 2010 Marc Kleine-Budde - * - * This file is released under the GPLv2 - * - */ - -#ifndef __CAN_PLATFORM_FLEXCAN_H -#define __CAN_PLATFORM_FLEXCAN_H - -/** - * struct flexcan_platform_data - flex CAN controller platform data - * @transceiver_enable: - called to power on/off the transceiver - * - */ -struct flexcan_platform_data { - void (*transceiver_switch)(int enable); -}; - -#endif /* __CAN_PLATFORM_FLEXCAN_H */ -- cgit v1.2.3 From 88f9b65d444794bb607f71644362ba0642585206 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 26 Jun 2013 10:02:11 +0200 Subject: bcma: add support for BCM43142 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rafał Miłecki Signed-off-by: John W. Linville --- drivers/bcma/bcma_private.h | 2 + drivers/bcma/driver_chipcommon.c | 11 ++- drivers/bcma/driver_chipcommon_pmu.c | 123 ++++++++++++++++++++++++++++ drivers/bcma/host_pci.c | 1 + drivers/bcma/main.c | 19 +++++ drivers/bcma/sprom.c | 1 + include/linux/bcma/bcma.h | 1 + include/linux/bcma/bcma_driver_chipcommon.h | 55 +++++++++++++ 8 files changed, 211 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 79595a001204..0215f9ad755c 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -22,6 +22,8 @@ struct bcma_bus; /* main.c */ +bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, + int timeout); int bcma_bus_register(struct bcma_bus *bus); void bcma_bus_unregister(struct bcma_bus *bus); int __init bcma_bus_early_register(struct bcma_bus *bus, diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index 036c6744b39b..b068f98920a8 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c @@ -140,8 +140,15 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc) bcma_core_chipcommon_early_init(cc); if (cc->core->id.rev >= 20) { - bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0); - bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, 0); + u32 pullup = 0, pulldown = 0; + + if (cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM43142) { + pullup = 0x402e0; + pulldown = 0x20500; + } + + bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, pullup); + bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, pulldown); } if (cc->capabilities & BCMA_CC_CAP_PMU) diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c index edca73af3cc0..5081a8c439cc 100644 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c @@ -56,6 +56,109 @@ void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, } EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset); +static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc) +{ + u32 ilp_ctl, alp_hz; + + if (!(bcma_cc_read32(cc, BCMA_CC_PMU_STAT) & + BCMA_CC_PMU_STAT_EXT_LPO_AVAIL)) + return 0; + + bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, + BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT)); + usleep_range(1000, 2000); + + ilp_ctl = bcma_cc_read32(cc, BCMA_CC_PMU_XTAL_FREQ); + ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK; + + bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0); + + alp_hz = ilp_ctl * 32768 / 4; + return (alp_hz + 50000) / 100000 * 100; +} + +static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq) +{ + struct bcma_bus *bus = cc->core->bus; + u32 freq_tgt_target = 0, freq_tgt_current; + u32 pll0, mask; + + switch (bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM43142: + /* pmu2_xtaltab0_adfll_485 */ + switch (xtalfreq) { + case 12000: + freq_tgt_target = 0x50D52; + break; + case 20000: + freq_tgt_target = 0x307FE; + break; + case 26000: + freq_tgt_target = 0x254EA; + break; + case 37400: + freq_tgt_target = 0x19EF8; + break; + case 52000: + freq_tgt_target = 0x12A75; + break; + } + break; + } + + if (!freq_tgt_target) { + bcma_err(bus, "Unknown TGT frequency for xtalfreq %d\n", + xtalfreq); + return; + } + + pll0 = bcma_chipco_pll_read(cc, BCMA_CC_PMU15_PLL_PLLCTL0); + freq_tgt_current = (pll0 & BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK) >> + BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT; + + if (freq_tgt_current == freq_tgt_target) { + bcma_debug(bus, "Target TGT frequency already set\n"); + return; + } + + /* Turn off PLL */ + switch (bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM43142: + mask = (u32)~(BCMA_RES_4314_HT_AVAIL | + BCMA_RES_4314_MACPHY_CLK_AVAIL); + + bcma_cc_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask); + bcma_cc_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask); + bcma_wait_value(cc->core, BCMA_CLKCTLST, + BCMA_CLKCTLST_HAVEHT, 0, 20000); + break; + } + + pll0 &= ~BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK; + pll0 |= freq_tgt_target << BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT; + bcma_chipco_pll_write(cc, BCMA_CC_PMU15_PLL_PLLCTL0, pll0); + + /* Flush */ + if (cc->pmu.rev >= 2) + bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD); + + /* TODO: Do we need to update OTP? */ +} + +static void bcma_pmu_pll_init(struct bcma_drv_cc *cc) +{ + struct bcma_bus *bus = cc->core->bus; + u32 xtalfreq = bcma_pmu_xtalfreq(cc); + + switch (bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM43142: + if (xtalfreq == 0) + xtalfreq = 20000; + bcma_pmu2_pll_init0(cc, xtalfreq); + break; + } +} + static void bcma_pmu_resources_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; @@ -66,6 +169,25 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc) min_msk = 0x200D; max_msk = 0xFFFF; break; + case BCMA_CHIP_ID_BCM43142: + min_msk = BCMA_RES_4314_LPLDO_PU | + BCMA_RES_4314_PMU_SLEEP_DIS | + BCMA_RES_4314_PMU_BG_PU | + BCMA_RES_4314_CBUCK_LPOM_PU | + BCMA_RES_4314_CBUCK_PFM_PU | + BCMA_RES_4314_CLDO_PU | + BCMA_RES_4314_LPLDO2_LVM | + BCMA_RES_4314_WL_PMU_PU | + BCMA_RES_4314_LDO3P3_PU | + BCMA_RES_4314_OTP_PU | + BCMA_RES_4314_WL_PWRSW_PU | + BCMA_RES_4314_LQ_AVAIL | + BCMA_RES_4314_LOGIC_RET | + BCMA_RES_4314_MEM_SLEEP | + BCMA_RES_4314_MACPHY_RET | + BCMA_RES_4314_WL_CORE_READY; + max_msk = 0x3FFFFFFF; + break; default: bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n", bus->chipinfo.id); @@ -165,6 +287,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc) bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_NOILPONW); + bcma_pmu_pll_init(cc); bcma_pmu_resources_init(cc); bcma_pmu_workarounds(cc); } diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index fbf2759e7e4e..a355e63a3838 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -275,6 +275,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, { 0, }, }; diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index f72f52b4b1dd..0067422ec17d 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -93,6 +93,25 @@ struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, return NULL; } +bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, + int timeout) +{ + unsigned long deadline = jiffies + timeout; + u32 val; + + do { + val = bcma_read32(core, reg); + if ((val & mask) == value) + return true; + cpu_relax(); + udelay(10); + } while (!time_after_eq(jiffies, deadline)); + + bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); + + return false; +} + static void bcma_release_core_dev(struct device *dev) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index de15b4f4b237..72bf4540f565 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -503,6 +503,7 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus) case BCMA_CHIP_ID_BCM4331: present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT; break; + case BCMA_CHIP_ID_BCM43142: case BCMA_CHIP_ID_BCM43224: case BCMA_CHIP_ID_BCM43225: /* for these chips OTP is always available */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 2e34db82a643..622fc505d3e1 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -144,6 +144,7 @@ struct bcma_host_ops { /* Chip IDs of PCIe devices */ #define BCMA_CHIP_ID_BCM4313 0x4313 +#define BCMA_CHIP_ID_BCM43142 43142 #define BCMA_CHIP_ID_BCM43224 43224 #define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8 #define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index b8b09eac60a4..c49e1a159e6e 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -330,6 +330,8 @@ #define BCMA_CC_PMU_CAP 0x0604 /* PMU capabilities */ #define BCMA_CC_PMU_CAP_REVISION 0x000000FF /* Revision mask */ #define BCMA_CC_PMU_STAT 0x0608 /* PMU status */ +#define BCMA_CC_PMU_STAT_EXT_LPO_AVAIL 0x00000100 +#define BCMA_CC_PMU_STAT_WDRESET 0x00000080 #define BCMA_CC_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ #define BCMA_CC_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ #define BCMA_CC_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ @@ -355,6 +357,11 @@ #define BCMA_CC_REGCTL_DATA 0x065C #define BCMA_CC_PLLCTL_ADDR 0x0660 #define BCMA_CC_PLLCTL_DATA 0x0664 +#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ +#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ +#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF +#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_MASK 0x80000000 +#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT 31 #define BCMA_CC_SPROM 0x0800 /* SPROM beginning */ /* NAND flash MLC controller registers (corerev >= 38) */ #define BCMA_CC_NAND_REVISION 0x0C00 @@ -435,6 +442,23 @@ #define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007 #define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_SHIFT 0 +/* PMU rev 15 */ +#define BCMA_CC_PMU15_PLL_PLLCTL0 0 +#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 +#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_SHIFT 0 +#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC +#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT 2 +#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 +#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_SHIFT 22 +#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 +#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_SHIFT 24 +#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 +#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 +#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 +#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_SHIFT 30 +#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 +#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 + /* ALP clock on pre-PMU chips */ #define BCMA_CC_PMU_ALP_CLOCK 20000000 /* HT clock for systems with PMU-enabled chipcommon */ @@ -507,6 +531,37 @@ #define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE BIT(18) #define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE BIT(19) +#define BCMA_RES_4314_LPLDO_PU BIT(0) +#define BCMA_RES_4314_PMU_SLEEP_DIS BIT(1) +#define BCMA_RES_4314_PMU_BG_PU BIT(2) +#define BCMA_RES_4314_CBUCK_LPOM_PU BIT(3) +#define BCMA_RES_4314_CBUCK_PFM_PU BIT(4) +#define BCMA_RES_4314_CLDO_PU BIT(5) +#define BCMA_RES_4314_LPLDO2_LVM BIT(6) +#define BCMA_RES_4314_WL_PMU_PU BIT(7) +#define BCMA_RES_4314_LNLDO_PU BIT(8) +#define BCMA_RES_4314_LDO3P3_PU BIT(9) +#define BCMA_RES_4314_OTP_PU BIT(10) +#define BCMA_RES_4314_XTAL_PU BIT(11) +#define BCMA_RES_4314_WL_PWRSW_PU BIT(12) +#define BCMA_RES_4314_LQ_AVAIL BIT(13) +#define BCMA_RES_4314_LOGIC_RET BIT(14) +#define BCMA_RES_4314_MEM_SLEEP BIT(15) +#define BCMA_RES_4314_MACPHY_RET BIT(16) +#define BCMA_RES_4314_WL_CORE_READY BIT(17) +#define BCMA_RES_4314_ILP_REQ BIT(18) +#define BCMA_RES_4314_ALP_AVAIL BIT(19) +#define BCMA_RES_4314_MISC_PWRSW_PU BIT(20) +#define BCMA_RES_4314_SYNTH_PWRSW_PU BIT(21) +#define BCMA_RES_4314_RX_PWRSW_PU BIT(22) +#define BCMA_RES_4314_RADIO_PU BIT(23) +#define BCMA_RES_4314_VCO_LDO_PU BIT(24) +#define BCMA_RES_4314_AFE_LDO_PU BIT(25) +#define BCMA_RES_4314_RX_LDO_PU BIT(26) +#define BCMA_RES_4314_TX_LDO_PU BIT(27) +#define BCMA_RES_4314_HT_AVAIL BIT(28) +#define BCMA_RES_4314_MACPHY_CLK_AVAIL BIT(29) + /* Data for the PMU, if available. * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) */ -- cgit v1.2.3 From 3b81a6809480f3fc7d9d06562704c8df18ecec00 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 26 Jun 2013 14:20:18 +0200 Subject: brcmfmac: add broken scatter-gather DMA support DMA engine of some old SDIO host controllers require block size alignment for data length of each scatterlist item. This patch introduces an intermediate buffer list to support this kind of platform. It decreases the throughput because of an extra memcpy in critical data path. So don't turn this on unless it's necessary. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Arend van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville --- drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c | 78 ++++++++++++++++++++---- include/linux/platform_data/brcmfmac-sdio.h | 5 ++ 2 files changed, 72 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 70cd0e9cd52b..e3f3c48f86d4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -331,10 +331,11 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, bool write, u32 addr, struct sk_buff_head *pktlist) { unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; - unsigned int max_blks, max_req_sz; + unsigned int max_blks, max_req_sz, orig_offset, dst_offset; unsigned short max_seg_sz, seg_sz; - unsigned char *pkt_data; - struct sk_buff *pkt_next = NULL; + unsigned char *pkt_data, *orig_data, *dst_data; + struct sk_buff *pkt_next = NULL, *local_pkt_next; + struct sk_buff_head local_list, *target_list; struct mmc_request mmc_req; struct mmc_command mmc_cmd; struct mmc_data mmc_dat; @@ -371,6 +372,32 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, req_sz); } + target_list = pktlist; + /* for host with broken sg support, prepare a page aligned list */ + __skb_queue_head_init(&local_list); + if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) { + req_sz = 0; + skb_queue_walk(pktlist, pkt_next) + req_sz += pkt_next->len; + req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize); + while (req_sz > PAGE_SIZE) { + pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE); + if (pkt_next == NULL) { + ret = -ENOMEM; + goto exit; + } + __skb_queue_tail(&local_list, pkt_next); + req_sz -= PAGE_SIZE; + } + pkt_next = brcmu_pkt_buf_get_skb(req_sz); + if (pkt_next == NULL) { + ret = -ENOMEM; + goto exit; + } + __skb_queue_tail(&local_list, pkt_next); + target_list = &local_list; + } + host = sdiodev->func[fn]->card->host; func_blk_sz = sdiodev->func[fn]->cur_blksize; /* Blocks per command is limited by host count, host transfer @@ -380,13 +407,15 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, max_req_sz = min_t(unsigned int, host->max_req_size, max_blks * func_blk_sz); max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC); - max_seg_sz = min_t(unsigned short, max_seg_sz, pktlist->qlen); - seg_sz = pktlist->qlen; + max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen); + seg_sz = target_list->qlen; pkt_offset = 0; - pkt_next = pktlist->next; + pkt_next = target_list->next; - if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) - return -ENOMEM; + if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) { + ret = -ENOMEM; + goto exit; + } while (seg_sz) { req_sz = 0; @@ -396,7 +425,7 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, memset(&mmc_dat, 0, sizeof(struct mmc_data)); sgl = st.sgl; /* prep sg table */ - while (pkt_next != (struct sk_buff *)pktlist) { + while (pkt_next != (struct sk_buff *)target_list) { pkt_data = pkt_next->data + pkt_offset; sg_data_sz = pkt_next->len - pkt_offset; if (sg_data_sz > host->max_seg_size) @@ -423,8 +452,8 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, if (req_sz % func_blk_sz != 0) { brcmf_err("sg request length %u is not %u aligned\n", req_sz, func_blk_sz); - sg_free_table(&st); - return -ENOTBLK; + ret = -ENOTBLK; + goto exit; } mmc_dat.sg = st.sgl; mmc_dat.sg_len = sg_cnt; @@ -457,7 +486,34 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, } } + if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) { + local_pkt_next = local_list.next; + orig_offset = 0; + skb_queue_walk(pktlist, pkt_next) { + dst_offset = 0; + do { + req_sz = local_pkt_next->len - orig_offset; + req_sz = min_t(uint, pkt_next->len - dst_offset, + req_sz); + orig_data = local_pkt_next->data + orig_offset; + dst_data = pkt_next->data + dst_offset; + memcpy(dst_data, orig_data, req_sz); + orig_offset += req_sz; + dst_offset += req_sz; + if (orig_offset == local_pkt_next->len) { + orig_offset = 0; + local_pkt_next = local_pkt_next->next; + } + if (dst_offset == pkt_next->len) + break; + } while (!skb_queue_empty(&local_list)); + } + } + +exit: sg_free_table(&st); + while ((pkt_next = __skb_dequeue(&local_list)) != NULL) + brcmu_pkt_buf_free_skb(pkt_next); return ret; } diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h index 1ade657d5fc1..b7174998c24a 100644 --- a/include/linux/platform_data/brcmfmac-sdio.h +++ b/include/linux/platform_data/brcmfmac-sdio.h @@ -90,6 +90,10 @@ void __init brcmfmac_init_pdata(void) * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are * used for registering the irq using request_irq function. * + * broken_sg_support: flag for broken sg list support of SDIO host controller. + * Set this to true if the SDIO host controller has higher align requirement + * than 32 bytes for each scatterlist item. + * * power_on: This function is called by the brcmfmac when the module gets * loaded. This can be particularly useful for low power devices. The platform * spcific routine may for example decide to power up the complete device. @@ -116,6 +120,7 @@ struct brcmfmac_sdio_platform_data { bool oob_irq_supported; unsigned int oob_irq_nr; unsigned long oob_irq_flags; + bool broken_sg_support; void (*power_on)(void); void (*power_off)(void); void (*reset)(void); -- cgit v1.2.3 From 621e84d6f373dcb273ebfd772638b8e7dc3c2c48 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Wed, 26 Jun 2013 16:11:27 +0200 Subject: dev: introduce skb_scrub_packet() The goal of this new function is to perform all needed cleanup before sending an skb into another netns. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + net/core/dev.c | 11 +---------- net/core/skbuff.c | 23 +++++++++++++++++++++++ 3 files changed, 25 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a7393adea0b5..6b06023e8a08 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2384,6 +2384,7 @@ extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); +extern void skb_scrub_packet(struct sk_buff *skb); extern struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); diff --git a/net/core/dev.c b/net/core/dev.c index 722f633926e0..370354a9c5f6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1652,22 +1652,13 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) } } - skb_orphan(skb); - if (unlikely(!is_skb_forwardable(dev, skb))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } - skb->skb_iif = 0; - skb_dst_drop(skb); - skb->tstamp.tv64 = 0; - skb->pkt_type = PACKET_HOST; + skb_scrub_packet(skb); skb->protocol = eth_type_trans(skb, dev); - skb->mark = 0; - secpath_reset(skb); - nf_reset(skb); - nf_reset_trace(skb); return netif_rx(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9f73eca29fbe..b1fcb8727e56 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3492,3 +3492,26 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, return true; } EXPORT_SYMBOL(skb_try_coalesce); + +/** + * skb_scrub_packet - scrub an skb before sending it to another netns + * + * @skb: buffer to clean + * + * skb_scrub_packet can be used to clean an skb before injecting it in + * another namespace. We have to clear all information in the skb that + * could impact namespace isolation. + */ +void skb_scrub_packet(struct sk_buff *skb) +{ + skb_orphan(skb); + skb->tstamp.tv64 = 0; + skb->pkt_type = PACKET_HOST; + skb->skb_iif = 0; + skb_dst_drop(skb); + skb->mark = 0; + secpath_reset(skb); + nf_reset(skb); + nf_reset_trace(skb); +} +EXPORT_SYMBOL_GPL(skb_scrub_packet); -- cgit v1.2.3 From 5e6700b3bf98fe98d630bf9c939ad4c85ce95592 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Wed, 26 Jun 2013 16:11:28 +0200 Subject: sit: add support of x-netns This patch allows to switch the netns when packet is encapsulated or decapsulated. In other word, the encapsulated packet is received in a netns, where the lookup is done to find the tunnel. Once the tunnel is found, the packet is decapsulated and injecting into the corresponding interface which stands to another netns. When one of the two netns is removed, the tunnel is destroyed. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- include/net/ip_tunnels.h | 1 + net/ipv4/ip_tunnel.c | 10 +++++++++- net/ipv6/sit.c | 42 ++++++++++++++++++++++++++++++++---------- 3 files changed, 42 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index b0d982471a5c..781b3cf86a2f 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -42,6 +42,7 @@ struct ip_tunnel { struct ip_tunnel __rcu *next; struct hlist_node hash_node; struct net_device *dev; + struct net *net; /* netns for packet i/o */ int err_count; /* Number of arrived ICMP errors */ unsigned long err_time; /* Time when the last ICMP error diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 3b00d81c8f1e..394cebc96d22 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -304,6 +304,7 @@ static struct net_device *__ip_tunnel_create(struct net *net, tunnel = netdev_priv(dev); tunnel->parms = *parms; + tunnel->net = net; err = register_netdevice(dev); if (err) @@ -453,6 +454,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); + if (tunnel->net != dev_net(tunnel->dev)) + skb_scrub_packet(skb); + if (tunnel->dev->type == ARPHRD_ETHER) { skb->protocol = eth_type_trans(skb, tunnel->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); @@ -541,7 +545,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); } - rt = ip_route_output_tunnel(dev_net(dev), &fl4, + rt = ip_route_output_tunnel(tunnel->net, &fl4, protocol, dst, tnl_params->saddr, tunnel->parms.o_key, @@ -602,6 +606,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } #endif + if (tunnel->net != dev_net(dev)) + skb_scrub_packet(skb); + if (tunnel->err_count > 0) { if (time_before(jiffies, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { @@ -888,6 +895,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], if (ip_tunnel_find(itn, p, dev->type)) return -EEXIST; + nt->net = net; nt->parms = *p; err = register_netdevice(dev); if (err) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index f639866b3dcf..97a0bfe2c293 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -466,14 +466,14 @@ isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t) static void ipip6_tunnel_uninit(struct net_device *dev) { - struct net *net = dev_net(dev); - struct sit_net *sitn = net_generic(net, sit_net_id); + struct ip_tunnel *tunnel = netdev_priv(dev); + struct sit_net *sitn = net_generic(tunnel->net, sit_net_id); if (dev == sitn->fb_tunnel_dev) { RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL); } else { - ipip6_tunnel_unlink(sitn, netdev_priv(dev)); - ipip6_tunnel_del_prl(netdev_priv(dev), NULL); + ipip6_tunnel_unlink(sitn, tunnel); + ipip6_tunnel_del_prl(tunnel, NULL); } dev_put(dev); } @@ -621,6 +621,8 @@ static int ipip6_rcv(struct sk_buff *skb) tstats->rx_packets++; tstats->rx_bytes += skb->len; + if (tunnel->net != dev_net(tunnel->dev)) + skb_scrub_packet(skb); netif_rx(skb); return 0; @@ -803,7 +805,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, goto tx_error; } - rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, + rt = ip_route_output_ports(tunnel->net, &fl4, NULL, dst, tiph->saddr, 0, 0, IPPROTO_IPV6, RT_TOS(tos), @@ -858,6 +860,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, tunnel->err_count = 0; } + if (tunnel->net != dev_net(dev)) + skb_scrub_packet(skb); + /* * Okay, now see if we can stuff it in the buffer as-is. */ @@ -944,7 +949,8 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) iph = &tunnel->parms.iph; if (iph->daddr) { - struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, + struct rtable *rt = ip_route_output_ports(tunnel->net, &fl4, + NULL, iph->daddr, iph->saddr, 0, 0, IPPROTO_IPV6, @@ -959,7 +965,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) } if (!tdev && tunnel->parms.link) - tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); + tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link); if (tdev) { dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); @@ -972,7 +978,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) { - struct net *net = dev_net(t->dev); + struct net *net = t->net; struct sit_net *sitn = net_generic(net, sit_net_id); ipip6_tunnel_unlink(sitn, t); @@ -1248,7 +1254,6 @@ static void ipip6_tunnel_setup(struct net_device *dev) dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->iflink = 0; dev->addr_len = 4; - dev->features |= NETIF_F_NETNS_LOCAL; dev->features |= NETIF_F_LLTX; } @@ -1257,6 +1262,7 @@ static int ipip6_tunnel_init(struct net_device *dev) struct ip_tunnel *tunnel = netdev_priv(dev); tunnel->dev = dev; + tunnel->net = dev_net(dev); memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); @@ -1277,6 +1283,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) struct sit_net *sitn = net_generic(net, sit_net_id); tunnel->dev = dev; + tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); iph->version = 4; @@ -1564,8 +1571,14 @@ static struct xfrm_tunnel ipip_handler __read_mostly = { static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) { + struct net *net = dev_net(sitn->fb_tunnel_dev); + struct net_device *dev, *aux; int prio; + for_each_netdev_safe(net, dev, aux) + if (dev->rtnl_link_ops == &sit_link_ops) + unregister_netdevice_queue(dev, head); + for (prio = 1; prio < 4; prio++) { int h; for (h = 0; h < HASH_SIZE; h++) { @@ -1573,7 +1586,12 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea t = rtnl_dereference(sitn->tunnels[prio][h]); while (t != NULL) { - unregister_netdevice_queue(t->dev, head); + /* If dev is in the same netns, it has already + * been added to the list by the previous loop. + */ + if (dev_net(t->dev) != net) + unregister_netdevice_queue(t->dev, + head); t = rtnl_dereference(t->next); } } @@ -1598,6 +1616,10 @@ static int __net_init sit_init_net(struct net *net) goto err_alloc_dev; } dev_net_set(sitn->fb_tunnel_dev, net); + /* FB netdevice is special: we have one, and only one per netns. + * Allowing to move it to another netns is clearly unsafe. + */ + sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); if (err) -- cgit v1.2.3 From 3a36515f729458c8efa0c124c7262d5843ad5c37 Mon Sep 17 00:00:00 2001 From: Pablo Neira Date: Fri, 28 Jun 2013 03:04:23 +0200 Subject: netlink: fix splat in skb_clone with large messages Since (c05cdb1 netlink: allow large data transfers from user-space), netlink splats if it invokes skb_clone on large netlink skbs since: * skb_shared_info was not correctly initialized. * skb->destructor is not set in the cloned skb. This was spotted by trinity: [ 894.990671] BUG: unable to handle kernel paging request at ffffc9000047b001 [ 894.991034] IP: [] skb_clone+0x24/0xc0 [...] [ 894.991034] Call Trace: [ 894.991034] [] nl_fib_input+0x6a/0x240 [ 894.991034] [] ? _raw_read_unlock+0x26/0x40 [ 894.991034] [] netlink_unicast+0x169/0x1e0 [ 894.991034] [] netlink_sendmsg+0x251/0x3d0 Fix it by: 1) introducing a new netlink_skb_clone function that is used in nl_fib_input, that sets our special skb->destructor in the cloned skb. Moreover, handle the release of the large cloned skb head area in the destructor path. 2) not allowing large skbuffs in the netlink broadcast path. I cannot find any reasonable use of the large data transfer using netlink in that path, moreover this helps to skip extra skb_clone handling. I found two more netlink clients that are cloning the skbs, but they are not in the sendmsg path. Therefore, the sole client cloning that I found seems to be the fib frontend. Thanks to Eric Dumazet for helping to address this issue. Reported-by: Fengguang Wu Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- include/linux/netlink.h | 16 ++++++++++++++++ net/ipv4/fib_frontend.c | 2 +- net/netlink/af_netlink.c | 35 ++++++++++++++++++----------------- 3 files changed, 35 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 86fde81ac2e6..7a6c396a263b 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -85,6 +85,22 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, void netlink_detachskb(struct sock *sk, struct sk_buff *skb); int netlink_sendskb(struct sock *sk, struct sk_buff *skb); +static inline struct sk_buff * +netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) +{ + struct sk_buff *nskb; + + nskb = skb_clone(skb, gfp_mask); + if (!nskb) + return NULL; + + /* This is a large skb, set destructor callback to release head */ + if (is_vmalloc_addr(skb->head)) + nskb->destructor = skb->destructor; + + return nskb; +} + /* * skb should fit one page. This choice is good for headerless malloc. * But we should limit to 8K so that userspace does not have to diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 05a4888dede9..b3f627ac4ed8 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -961,7 +961,7 @@ static void nl_fib_input(struct sk_buff *skb) nlmsg_len(nlh) < sizeof(*frn)) return; - skb = skb_clone(skb, GFP_KERNEL); + skb = netlink_skb_clone(skb, GFP_KERNEL); if (skb == NULL) return; nlh = nlmsg_hdr(skb); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 6967fbcca6c5..0c61b59175dc 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -849,7 +849,10 @@ static void netlink_skb_destructor(struct sk_buff *skb) } #endif if (is_vmalloc_addr(skb->head)) { - vfree(skb->head); + if (!skb->cloned || + !atomic_dec_return(&(skb_shinfo(skb)->dataref))) + vfree(skb->head); + skb->head = NULL; } if (skb->sk != NULL) @@ -1532,33 +1535,31 @@ struct sock *netlink_getsockbyfilp(struct file *filp) return sock; } -static struct sk_buff *netlink_alloc_large_skb(unsigned int size) +static struct sk_buff *netlink_alloc_large_skb(unsigned int size, + int broadcast) { struct sk_buff *skb; void *data; - if (size <= NLMSG_GOODSIZE) + if (size <= NLMSG_GOODSIZE || broadcast) return alloc_skb(size, GFP_KERNEL); - skb = alloc_skb_head(GFP_KERNEL); - if (skb == NULL) - return NULL; + size = SKB_DATA_ALIGN(size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); data = vmalloc(size); if (data == NULL) - goto err; + return NULL; - skb->head = data; - skb->data = data; - skb_reset_tail_pointer(skb); - skb->end = skb->tail + size; - skb->len = 0; - skb->destructor = netlink_skb_destructor; + skb = build_skb(data, size); + if (skb == NULL) + vfree(data); + else { + skb->head_frag = 0; + skb->destructor = netlink_skb_destructor; + } return skb; -err: - kfree_skb(skb); - return NULL; } /* @@ -2244,7 +2245,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; - skb = netlink_alloc_large_skb(len); + skb = netlink_alloc_large_skb(len, dst_group); if (skb == NULL) goto out; -- cgit v1.2.3 From 1ec047eb4751e331bc61cff0e98f0db67db8b8dc Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Thu, 27 Jun 2013 00:06:56 +0200 Subject: ipv6: introduce per-interface counter for dad-completed ipv6 addresses To reduce the number of unnecessary router solicitations, MLDv2 and IGMPv3 messages we need to track the number of valid (as in non-optimistic, no-dad-failed and non-tentative) link-local addresses. Therefore, this patch implements a valid_ll_addr_cnt in struct inet6_dev. We now only emit router solicitations if the first link-local address finishes duplicate address detection. The changes for MLDv2 and IGMPv3 are in a follow-up patch. While there, also simplify one if statement(one minor nit I made in one of my previous patches): if (!...) do(); else return; <> if (...) return; do(); Cc: Flavio Leitner Cc: YOSHIFUJI Hideaki Cc: David Stevens Suggested-by: David Stevens Signed-off-by: Hannes Frederic Sowa Acked-by: Flavio Leitner Signed-off-by: David S. Miller --- include/net/if_inet6.h | 1 + net/ipv6/addrconf.c | 39 +++++++++++++++++++++++++++++++-------- 2 files changed, 32 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index e4c5a2d2ba34..1628b8f5fb26 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -166,6 +166,7 @@ struct inet6_dev { struct net_device *dev; struct list_head addr_list; + int valid_ll_addr_cnt; struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_tomb; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4e4cc1fc26d1..20d92ff2d690 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3277,6 +3277,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) { struct net_device *dev = ifp->idev->dev; struct in6_addr lladdr; + bool send_rs; addrconf_del_dad_timer(ifp); @@ -3290,20 +3291,25 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) router advertisements, start sending router solicitations. */ - if (ipv6_accept_ra(ifp->idev) && - ifp->idev->cnf.rtr_solicits > 0 && - (dev->flags&IFF_LOOPBACK) == 0 && - (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { + read_lock_bh(&ifp->idev->lock); + spin_lock(&ifp->lock); + send_rs = ipv6_accept_ra(ifp->idev) && + ifp->idev->cnf.rtr_solicits > 0 && + (dev->flags&IFF_LOOPBACK) == 0 && + ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL && + ifp->idev->valid_ll_addr_cnt == 1; + spin_unlock(&ifp->lock); + read_unlock_bh(&ifp->idev->lock); + + if (send_rs) { /* * If a host as already performed a random delay * [...] as part of DAD [...] there is no need * to delay again before sending the first RS */ - if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) - ndisc_send_rs(dev, &lladdr, - &in6addr_linklocal_allrouters); - else + if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) return; + ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters); write_lock_bh(&ifp->idev->lock); spin_lock(&ifp->lock); @@ -4576,6 +4582,19 @@ errout: rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err); } +static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count) +{ + write_lock_bh(&ifp->idev->lock); + spin_lock(&ifp->lock); + if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC| + IFA_F_DADFAILED)) == IFA_F_PERMANENT) && + (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) + ifp->idev->valid_ll_addr_cnt += count; + WARN_ON(ifp->idev->valid_ll_addr_cnt < 0); + spin_unlock(&ifp->lock); + write_unlock_bh(&ifp->idev->lock); +} + static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) { struct net *net = dev_net(ifp->idev->dev); @@ -4584,6 +4603,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) switch (event) { case RTM_NEWADDR: + update_valid_ll_addr_cnt(ifp, 1); + /* * If the address was optimistic * we inserted the route at the start of @@ -4599,6 +4620,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) ifp->idev->dev, 0, 0); break; case RTM_DELADDR: + update_valid_ll_addr_cnt(ifp, -1); + if (ifp->idev->cnf.forwarding) addrconf_leave_anycast(ifp); addrconf_leave_solict(ifp->idev, &ifp->addr); -- cgit v1.2.3 From b173ee488dcc545e77ed482158a2f0d06d7a5860 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Thu, 27 Jun 2013 00:07:01 +0200 Subject: ipv6: resend MLD report if a link-local address completes DAD RFC3590/RFC3810 specifies we should resend MLD reports as soon as a valid link-local address is available. We now use the valid_ll_addr_cnt to check if it is necessary to resend a new report. Changes since Flavio Leitner's version: a) adapt for valid_ll_addr_cnt b) resend first reports directly in the path and just arm the timer for mc_qrv-1 resends. Reported-by: Flavio Leitner Cc: Hideaki YOSHIFUJI Cc: David Stevens Signed-off-by: Hannes Frederic Sowa Signed-off-by: Flavio Leitner Signed-off-by: David S. Miller --- include/net/addrconf.h | 1 + include/net/if_inet6.h | 2 ++ net/ipv6/addrconf.c | 17 ++++++++++++----- net/ipv6/mcast.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 67 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 21f702704f24..f68eaf574d7e 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -155,6 +155,7 @@ extern bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, const struct in6_addr *src_addr); +extern void ipv6_mc_dad_complete(struct inet6_dev *idev); /* * identify MLD packets for MLD filter exceptions */ diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 1628b8f5fb26..736b5fb95474 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -174,10 +174,12 @@ struct inet6_dev { unsigned char mc_qrv; unsigned char mc_gq_running; unsigned char mc_ifc_count; + unsigned char mc_dad_count; unsigned long mc_v1_seen; unsigned long mc_maxdelay; struct timer_list mc_gq_timer; /* general query timer */ struct timer_list mc_ifc_timer; /* interface change timer */ + struct timer_list mc_dad_timer; /* dad complete mc timer */ struct ifacaddr6 *ac_list; rwlock_t lock; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 20d92ff2d690..12dd2fec045c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3277,7 +3277,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) { struct net_device *dev = ifp->idev->dev; struct in6_addr lladdr; - bool send_rs; + bool send_rs, send_mld; addrconf_del_dad_timer(ifp); @@ -3293,14 +3293,21 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) read_lock_bh(&ifp->idev->lock); spin_lock(&ifp->lock); - send_rs = ipv6_accept_ra(ifp->idev) && + send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL && + ifp->idev->valid_ll_addr_cnt == 1; + send_rs = send_mld && + ipv6_accept_ra(ifp->idev) && ifp->idev->cnf.rtr_solicits > 0 && - (dev->flags&IFF_LOOPBACK) == 0 && - ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL && - ifp->idev->valid_ll_addr_cnt == 1; + (dev->flags&IFF_LOOPBACK) == 0; spin_unlock(&ifp->lock); read_unlock_bh(&ifp->idev->lock); + /* While dad is in progress mld report's source address is in6_addrany. + * Resend with proper ll now. + */ + if (send_mld) + ipv6_mc_dad_complete(ifp->idev); + if (send_rs) { /* * If a host as already performed a random delay diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 72c8bfe06bb4..502c877cbf10 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -999,6 +999,14 @@ static void mld_ifc_start_timer(struct inet6_dev *idev, int delay) in6_dev_hold(idev); } +static void mld_dad_start_timer(struct inet6_dev *idev, int delay) +{ + int tv = net_random() % delay; + + if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2)) + in6_dev_hold(idev); +} + /* * IGMP handling (alias multicast ICMPv6 messages) */ @@ -1815,6 +1823,46 @@ err_out: goto out; } +static void mld_resend_report(struct inet6_dev *idev) +{ + if (MLD_V1_SEEN(idev)) { + struct ifmcaddr6 *mcaddr; + read_lock_bh(&idev->lock); + for (mcaddr = idev->mc_list; mcaddr; mcaddr = mcaddr->next) { + if (!(mcaddr->mca_flags & MAF_NOREPORT)) + igmp6_send(&mcaddr->mca_addr, idev->dev, + ICMPV6_MGM_REPORT); + } + read_unlock_bh(&idev->lock); + } else { + mld_send_report(idev, NULL); + } +} + +void ipv6_mc_dad_complete(struct inet6_dev *idev) +{ + idev->mc_dad_count = idev->mc_qrv; + if (idev->mc_dad_count) { + mld_resend_report(idev); + idev->mc_dad_count--; + if (idev->mc_dad_count) + mld_dad_start_timer(idev, idev->mc_maxdelay); + } +} + +static void mld_dad_timer_expire(unsigned long data) +{ + struct inet6_dev *idev = (struct inet6_dev *)data; + + mld_resend_report(idev); + if (idev->mc_dad_count) { + idev->mc_dad_count--; + if (idev->mc_dad_count) + mld_dad_start_timer(idev, idev->mc_maxdelay); + } + __in6_dev_put(idev); +} + static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, const struct in6_addr *psfsrc) { @@ -2232,6 +2280,8 @@ void ipv6_mc_down(struct inet6_dev *idev) idev->mc_gq_running = 0; if (del_timer(&idev->mc_gq_timer)) __in6_dev_put(idev); + if (del_timer(&idev->mc_dad_timer)) + __in6_dev_put(idev); for (i = idev->mc_list; i; i=i->next) igmp6_group_dropped(i); @@ -2268,6 +2318,8 @@ void ipv6_mc_init_dev(struct inet6_dev *idev) idev->mc_ifc_count = 0; setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire, (unsigned long)idev); + setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire, + (unsigned long)idev); idev->mc_qrv = MLD_QRV_DEFAULT; idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL; idev->mc_v1_seen = 0; -- cgit v1.2.3 From 2ffae99d1fac272952b5a395759823717760ce37 Mon Sep 17 00:00:00 2001 From: Timo Teräs Date: Thu, 27 Jun 2013 10:27:05 +0300 Subject: ipv4: use next hop exceptions also for input routes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit d2d68ba9 (ipv4: Cache input routes in fib_info nexthops) assmued that "locally destined, and routed packets, never trigger PMTU events or redirects that will be processed by us". However, it seems that tunnel devices do trigger PMTU events in certain cases. At least ip_gre, ip6_gre, sit, and ipip do use the inner flow's skb_dst(skb)->ops->update_pmtu to propage mtu information from the outer flows. These can cause the inner flow mtu to be decreased. If next hop exceptions are not consulted for pmtu, IP fragmentation will not be done properly for these routes. It also seems that we really need to have the PMTU information always for netfilter TCPMSS clamp-to-pmtu feature to work properly. So for the time being, cache separate copies of input routes for each next hop exception. Signed-off-by: Timo Teräs Reviewed-by: Julian Anastasov Signed-off-by: David S. Miller --- include/net/ip_fib.h | 3 ++- net/ipv4/fib_semantics.c | 3 ++- net/ipv4/route.c | 65 +++++++++++++++++++++++++++++++++++++----------- 3 files changed, 54 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 44424e9dab2a..aac85534d7b5 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -56,7 +56,8 @@ struct fib_nh_exception { u32 fnhe_pmtu; __be32 fnhe_gw; unsigned long fnhe_expires; - struct rtable __rcu *fnhe_rth; + struct rtable __rcu *fnhe_rth_input; + struct rtable __rcu *fnhe_rth_output; unsigned long fnhe_stamp; }; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 8f6cb7a87cd6..d5dbca5ecf62 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -169,7 +169,8 @@ static void free_nh_exceptions(struct fib_nh *nh) next = rcu_dereference_protected(fnhe->fnhe_next, 1); - rt_fibinfo_free(&fnhe->fnhe_rth); + rt_fibinfo_free(&fnhe->fnhe_rth_input); + rt_fibinfo_free(&fnhe->fnhe_rth_output); kfree(fnhe); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f3fa42eac461..a9a54a236832 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -565,10 +565,25 @@ static inline void rt_free(struct rtable *rt) static DEFINE_SPINLOCK(fnhe_lock); +static void fnhe_flush_routes(struct fib_nh_exception *fnhe) +{ + struct rtable *rt; + + rt = rcu_dereference(fnhe->fnhe_rth_input); + if (rt) { + RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL); + rt_free(rt); + } + rt = rcu_dereference(fnhe->fnhe_rth_output); + if (rt) { + RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL); + rt_free(rt); + } +} + static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) { struct fib_nh_exception *fnhe, *oldest; - struct rtable *orig; oldest = rcu_dereference(hash->chain); for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; @@ -576,11 +591,7 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) oldest = fnhe; } - orig = rcu_dereference(oldest->fnhe_rth); - if (orig) { - RCU_INIT_POINTER(oldest->fnhe_rth, NULL); - rt_free(orig); - } + fnhe_flush_routes(oldest); return oldest; } @@ -644,7 +655,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_expires = max(1UL, expires); } /* Update all cached dsts too */ - rt = rcu_dereference(fnhe->fnhe_rth); + rt = rcu_dereference(fnhe->fnhe_rth_input); + if (rt) + fill_route_from_fnhe(rt, fnhe); + rt = rcu_dereference(fnhe->fnhe_rth_output); if (rt) fill_route_from_fnhe(rt, fnhe); } else { @@ -668,6 +682,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, * stale, so anyone caching it rechecks if this exception * applies to them. */ + rt = rcu_dereference(nh->nh_rth_input); + if (rt) + rt->dst.obsolete = DST_OBSOLETE_KILL; + for_each_possible_cpu(i) { struct rtable __rcu **prt; prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i); @@ -1242,25 +1260,36 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, spin_lock_bh(&fnhe_lock); if (daddr == fnhe->fnhe_daddr) { + struct rtable __rcu **porig; + struct rtable *orig; int genid = fnhe_genid(dev_net(rt->dst.dev)); - struct rtable *orig = rcu_dereference(fnhe->fnhe_rth); + + if (rt_is_input_route(rt)) + porig = &fnhe->fnhe_rth_input; + else + porig = &fnhe->fnhe_rth_output; + orig = rcu_dereference(*porig); if (fnhe->fnhe_genid != genid) { fnhe->fnhe_genid = genid; fnhe->fnhe_gw = 0; fnhe->fnhe_pmtu = 0; fnhe->fnhe_expires = 0; + fnhe_flush_routes(fnhe); + orig = NULL; } fill_route_from_fnhe(rt, fnhe); if (!rt->rt_gateway) rt->rt_gateway = daddr; - rcu_assign_pointer(fnhe->fnhe_rth, rt); - if (orig) - rt_free(orig); + if (!(rt->dst.flags & DST_NOCACHE)) { + rcu_assign_pointer(*porig, rt); + if (orig) + rt_free(orig); + ret = true; + } fnhe->fnhe_stamp = jiffies; - ret = true; } spin_unlock_bh(&fnhe_lock); @@ -1492,6 +1521,7 @@ static int __mkroute_input(struct sk_buff *skb, struct in_device *in_dev, __be32 daddr, __be32 saddr, u32 tos) { + struct fib_nh_exception *fnhe; struct rtable *rth; int err; struct in_device *out_dev; @@ -1538,8 +1568,13 @@ static int __mkroute_input(struct sk_buff *skb, } } + fnhe = find_exception(&FIB_RES_NH(*res), daddr); if (do_cache) { - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); + if (fnhe != NULL) + rth = rcu_dereference(fnhe->fnhe_rth_input); + else + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); + if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); goto out; @@ -1567,7 +1602,7 @@ static int __mkroute_input(struct sk_buff *skb, rth->dst.input = ip_forward; rth->dst.output = ip_output; - rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag); + rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag); skb_dst_set(skb, &rth->dst); out: err = 0; @@ -1882,7 +1917,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, fnhe = find_exception(nh, fl4->daddr); if (fnhe) - prth = &fnhe->fnhe_rth; + prth = &fnhe->fnhe_rth_output; else { if (unlikely(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH && -- cgit v1.2.3 From 496e4ae7dc944faa1721bfda7e9d834d5611a874 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 29 Jun 2013 14:15:47 +0200 Subject: netfilter: nf_queue: add NFQA_SKB_CSUM_NOTVERIFIED info flag The common case is that TCP/IP checksums have already been verified, e.g. by hardware (rx checksum offload), or conntrack. Userspace can use this flag to determine when the checksum has not been validated yet. If the flag is set, this doesn't necessarily mean that the packet has an invalid checksum, e.g. if NIC doesn't support rx checksum. Userspace that sucessfully enabled NFQA_CFG_F_GSO queue feature flag can infer that IP/TCP checksum has already been validated if either the SKB_INFO attribute is not present or the NFQA_SKB_CSUM_NOTVERIFIED flag is unset. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nfnetlink_queue.h | 2 ++ net/netfilter/nfnetlink_queue_core.c | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index a2308ae5a73d..3a9b92147339 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -105,5 +105,7 @@ enum nfqnl_attr_config { #define NFQA_SKB_CSUMNOTREADY (1 << 0) /* packet is GSO (i.e., exceeds device mtu) */ #define NFQA_SKB_GSO (1 << 1) +/* csum not validated (incoming device doesn't support hw checksum, etc.) */ +#define NFQA_SKB_CSUM_NOTVERIFIED (1 << 2) #endif /* _NFNETLINK_QUEUE_H */ diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 299a48ae5dc9..971ea145ab3e 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -280,12 +280,17 @@ nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) skb_shinfo(to)->nr_frags = j; } -static int nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet) +static int +nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet, + bool csum_verify) { __u32 flags = 0; if (packet->ip_summed == CHECKSUM_PARTIAL) flags = NFQA_SKB_CSUMNOTREADY; + else if (csum_verify) + flags = NFQA_SKB_CSUM_NOTVERIFIED; + if (skb_is_gso(packet)) flags |= NFQA_SKB_GSO; @@ -310,6 +315,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, struct net_device *outdev; struct nf_conn *ct = NULL; enum ip_conntrack_info uninitialized_var(ctinfo); + bool csum_verify; size = nlmsg_total_size(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) @@ -327,6 +333,12 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (entskb->tstamp.tv64) size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); + if (entry->hook <= NF_INET_FORWARD || + (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) + csum_verify = !skb_csum_unnecessary(entskb); + else + csum_verify = false; + outdev = entry->outdev; switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { @@ -476,7 +488,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) goto nla_put_failure; - if (nfqnl_put_packet_info(skb, entskb)) + if (nfqnl_put_packet_info(skb, entskb, csum_verify)) goto nla_put_failure; if (data_len) { -- cgit v1.2.3 From b01978cacfd7e3a4ca703b0e48f2e18de8865df5 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 27 Jun 2013 19:05:21 +0300 Subject: net/mlx4_core: Dynamic VST to VST vlan/qos changes Within VST mode, enable modifying the vlan and/or qos for a VF without requiring unbind/rebind. This requires firmware which supports the UPDATE_QP command. (If the command is not available, we fall back to requiring unbind/bind to activate these changes). To avoid race conditions with modify-qp on QPs that are affected by update-qp, this operation is performed on the comm_wq. If the update operation succeeds for all the necessary QPs, a vlan_unregister is performed for the abandoned vlan id. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 122 ++++++++++++++++- drivers/net/ethernet/mellanox/mlx4/fw.c | 5 +- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 20 +++ .../net/ethernet/mellanox/mlx4/resource_tracker.c | 145 ++++++++++++++++++++- include/linux/mlx4/cmd.h | 1 + include/linux/mlx4/device.h | 3 +- include/linux/mlx4/qp.h | 34 +++++ 7 files changed, 319 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index df04c8206ebe..7b927891dc35 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -112,6 +112,14 @@ enum { GO_BIT_TIMEOUT_MSECS = 10000 }; +enum mlx4_vlan_transition { + MLX4_VLAN_TRANSITION_VST_VST = 0, + MLX4_VLAN_TRANSITION_VST_VGT = 1, + MLX4_VLAN_TRANSITION_VGT_VST = 2, + MLX4_VLAN_TRANSITION_VGT_VGT = 3, +}; + + struct mlx4_cmd_context { struct completion done; int result; @@ -792,6 +800,15 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } +int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1225,6 +1242,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_GEN_QP_wrapper }, + { + .opcode = MLX4_CMD_UPDATE_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = MLX4_CMD_UPDATE_QP_wrapper + }, { .opcode = MLX4_CMD_CONF_SPECIAL_QP, .has_inbox = false, @@ -1495,6 +1521,72 @@ out: return ret; } + +int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, + int slave, int port) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_vport_state *vp_admin; + struct mlx4_vf_immed_vlan_work *work; + int err; + int admin_vlan_ix = NO_INDX; + + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + if (vp_oper->state.default_vlan == vp_admin->default_vlan && + vp_oper->state.default_qos == vp_admin->default_qos) + return 0; + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + if (vp_oper->state.default_vlan != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, + &admin_vlan_ix); + if (err) { + mlx4_warn((&priv->dev), + "No vlan resources slave %d, port %d\n", + slave, port); + return err; + } + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; + mlx4_dbg((&(priv->dev)), + "alloc vlan %d idx %d slave %d port %d\n", + (int)(vp_admin->default_vlan), + admin_vlan_ix, slave, port); + } + + /* save original vlan ix and vlan id */ + work->orig_vlan_id = vp_oper->state.default_vlan; + work->orig_vlan_ix = vp_oper->vlan_idx; + + /* handle new qos */ + if (vp_oper->state.default_qos != vp_admin->default_qos) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; + + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) + vp_oper->vlan_idx = admin_vlan_ix; + + vp_oper->state.default_vlan = vp_admin->default_vlan; + vp_oper->state.default_qos = vp_admin->default_qos; + + /* iterate over QPs owned by this slave, using UPDATE_QP */ + work->port = port; + work->slave = slave; + work->qos = vp_oper->state.default_qos; + work->vlan_id = vp_oper->state.default_vlan; + work->vlan_ix = vp_oper->vlan_idx; + work->priv = priv; + INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); + queue_work(priv->mfunc.master.comm_wq, &work->work); + + return 0; +} + + static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) { int port, err; @@ -2109,11 +2201,18 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) } EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); +static int calculate_transition(u16 oper_vlan, u16 admin_vlan) +{ + return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); +} + int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_vport_state *s_info; + struct mlx4_vport_oper_state *vf_oper; + struct mlx4_vport_state *vf_admin; int slave; + enum mlx4_vlan_transition vlan_trans; if ((!mlx4_is_master(dev)) || !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) @@ -2126,12 +2225,25 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) if (slave < 0) return -EINVAL; - s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + if ((0 == vlan) && (0 == qos)) - s_info->default_vlan = MLX4_VGT; + vf_admin->default_vlan = MLX4_VGT; else - s_info->default_vlan = vlan; - s_info->default_qos = qos; + vf_admin->default_vlan = vlan; + vf_admin->default_qos = qos; + + vlan_trans = calculate_transition(vf_oper->state.default_vlan, + vf_admin->default_vlan); + + if (priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && + vlan_trans == MLX4_VLAN_TRANSITION_VST_VST) { + mlx4_info(dev, "updating vf %d port %d config params immediately\n", + vf, port); + mlx4_master_immediate_activate_vlan_qos(priv, slave, port); + } return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 569bbe3e7403..8873d6802c80 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -133,7 +133,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [4] = "Automatic MAC reassignment support", [5] = "Time stamping support", [6] = "VST (control vlan insertion/stripping) support", - [7] = "FSM (MAC anti-spoofing) support" + [7] = "FSM (MAC anti-spoofing) support", + [8] = "Dynamic QP updates support" }; int i; @@ -659,6 +660,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + if (field32 & (1 << 16)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; if (field32 & (1 << 26)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; if (field32 & (1 << 20)) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 75272935a3f7..5abcb6501e30 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -571,6 +571,24 @@ struct mlx4_cmd { u8 comm_toggle; }; +enum { + MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, + MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, +}; +struct mlx4_vf_immed_vlan_work { + struct work_struct work; + struct mlx4_priv *priv; + int flags; + int slave; + int vlan_ix; + int orig_vlan_ix; + u8 port; + u8 qos; + u16 vlan_id; + u16 orig_vlan_id; +}; + + struct mlx4_uar_table { struct mlx4_bitmap bitmap; }; @@ -1218,4 +1236,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) #define NOT_MASKED_PD_BITS 17 +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); + #endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1157f028a90f..46323a20060c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -101,6 +101,8 @@ struct res_qp { spinlock_t mcg_spl; int local_qpn; atomic_t ref_count; + u32 qpc_flags; + u8 sched_queue; }; enum res_mtt_states { @@ -355,7 +357,7 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, - u8 slave) + u8 slave, u32 qpn) { struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; @@ -369,9 +371,17 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (MLX4_VGT != vp_oper->state.default_vlan) { qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; - if (MLX4_QP_ST_RC == qp_type) + if (MLX4_QP_ST_RC == qp_type || + (MLX4_QP_ST_UD == qp_type && + !mlx4_is_qp_reserved(dev, qpn))) return -EINVAL; + /* the reserved QPs (special, proxy, tunnel) + * do not operate over vlans + */ + if (mlx4_is_qp_reserved(dev, qpn)) + return 0; + /* force strip vlan by clear vsd */ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); if (0 != vp_oper->state.default_vlan) { @@ -2114,6 +2124,8 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, if (err) return err; qp->local_qpn = local_qpn; + qp->sched_queue = 0; + qp->qpc_flags = be32_to_cpu(qpc->flags); err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) @@ -2836,6 +2848,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, { int err; struct mlx4_qp_context *qpc = inbox->buf + 8; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + u8 orig_sched_queue; err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); if (err) @@ -2844,11 +2859,30 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, update_pkey_index(dev, slave, inbox); update_gid(dev, inbox, (u8)slave); adjust_proxy_tun_qkey(dev, vhcr, qpc); - err = update_vport_qp_param(dev, inbox, slave); + orig_sched_queue = qpc->pri_path.sched_queue; + err = update_vport_qp_param(dev, inbox, slave, qpn); if (err) return err; - return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + err = get_res(dev, slave, qpn, RES_QP, &qp); + if (err) + return err; + if (qp->com.from_state != RES_QP_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + /* if no error, save sched queue value passed in by VF. This is + * essentially the QOS value provided by the VF. This will be useful + * if we allow dynamic changes from VST back to VGT + */ + if (!err) + qp->sched_queue = orig_sched_queue; + + put_res(dev, slave, qpn, RES_QP); + return err; } int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, @@ -3932,3 +3966,106 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) rem_slave_xrcdns(dev, slave); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } + +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) +{ + struct mlx4_vf_immed_vlan_work *work = + container_of(_work, struct mlx4_vf_immed_vlan_work, work); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *upd_context; + struct mlx4_dev *dev = &work->priv->dev; + struct mlx4_resource_tracker *tracker = + &work->priv->mfunc.master.res_tracker; + struct list_head *qp_list = + &tracker->slave_list[work->slave].res_list[RES_QP]; + struct res_qp *qp; + struct res_qp *tmp; + u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); + + int err; + int port, errors = 0; + u8 vlan_control; + + if (mlx4_is_slave(dev)) { + mlx4_warn(dev, "Trying to update-qp in slave %d\n", + work->slave); + goto out; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + goto out; + + if (!work->vlan_id) + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + + upd_context = mailbox->buf; + upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask); + upd_context->qp_context.pri_path.vlan_control = vlan_control; + upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(qp, tmp, qp_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (qp->com.owner == work->slave) { + if (qp->com.from_state != RES_QP_HW || + !qp->sched_queue || /* no INIT2RTR trans yet */ + mlx4_is_qp_reserved(dev, qp->local_qpn) || + qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + port = (qp->sched_queue >> 6 & 1) + 1; + if (port != work->port) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue & 0xC7; + upd_context->qp_context.pri_path.sched_queue |= + ((work->qos & 0x7) << 3); + + err = mlx4_cmd(dev, mailbox->dma, + qp->local_qpn & 0xffffff, + 0, MLX4_CMD_UPDATE_QP, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + if (err) { + mlx4_info(dev, "UPDATE_QP failed for slave %d, " + "port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, + err); + errors++; + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (errors) + mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", + errors, work->slave, work->port); + + /* unregister previous vlan_id if needed and we had no errors + * while updating the QPs + */ + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && + NO_INDX != work->orig_vlan_ix) + __mlx4_unregister_vlan(&work->priv->dev, work->port, + work->orig_vlan_ix); +out: + kfree(work); + return; +} diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 8074a9711cf1..bb1c8096a7eb 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -111,6 +111,7 @@ enum { MLX4_CMD_INIT2INIT_QP = 0x2d, MLX4_CMD_SUSPEND_QP = 0x32, MLX4_CMD_UNSUSPEND_QP = 0x33, + MLX4_CMD_UPDATE_QP = 0x61, /* special QP and management commands */ MLX4_CMD_CONF_SPECIAL_QP = 0x23, MLX4_CMD_MAD_IFC = 0x24, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a51b0134ce18..52c23a892bab 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -157,7 +157,8 @@ enum { MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, - MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7 + MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7, + MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8 }; enum { diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 352eec9df1b8..f43e32aa054a 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -206,6 +206,40 @@ struct mlx4_qp_context { u32 reserved5[10]; }; +struct mlx4_update_qp_context { + __be64 qp_mask; + __be64 primary_addr_path_mask; + __be64 secondary_addr_path_mask; + u64 reserved1; + struct mlx4_qp_context qp_context; + u64 reserved2[58]; +}; + +enum { + MLX4_UPD_QP_MASK_PM_STATE = 32, + MLX4_UPD_QP_MASK_VSD = 33, +}; + +enum { + MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32, + MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32, + MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32, + MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32, + MLX4_UPD_QP_PATH_MASK_CV = 4 + 32, + MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32, + MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32, + MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32, + MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32, + MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, +}; + enum { /* param3 */ MLX4_STRIP_VLAN = 1 << 30 }; -- cgit v1.2.3 From 0a6eac24583848490e9a9c02daef5e33c997431f Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 27 Jun 2013 19:05:22 +0300 Subject: net/mlx4_core: Add HW enforcement to VF link state When the firmware supports the UPDATE_QP command, if the VF link is disabled, block all QPs opened by the VF, by programming the UPDATE_QP command to drop all RX & TX traffic to/from these QPs. Operates only in VST mode. Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 57 ++++++++++++++-------- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 + .../net/ethernet/mellanox/mlx4/resource_tracker.c | 22 +++++++-- include/linux/mlx4/qp.h | 2 + 4 files changed, 60 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 7b927891dc35..707a7d0d08e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1521,6 +1521,10 @@ out: return ret; } +static int calculate_transition(u16 oper_vlan, u16 admin_vlan) +{ + return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); +} int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, int slave, int port) @@ -1528,16 +1532,37 @@ int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, struct mlx4_vport_oper_state *vp_oper; struct mlx4_vport_state *vp_admin; struct mlx4_vf_immed_vlan_work *work; + struct mlx4_dev *dev = &(priv->dev); int err; int admin_vlan_ix = NO_INDX; + enum mlx4_vlan_transition vlan_trans; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; if (vp_oper->state.default_vlan == vp_admin->default_vlan && - vp_oper->state.default_qos == vp_admin->default_qos) + vp_oper->state.default_qos == vp_admin->default_qos && + vp_oper->state.link_state == vp_admin->link_state) return 0; + vlan_trans = calculate_transition(vp_oper->state.default_vlan, + vp_admin->default_vlan); + + if (!(priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && + vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) { + /* even if the UPDATE_QP command isn't supported, we still want + * to set this VF link according to the admin directive + */ + vp_oper->state.link_state = vp_admin->link_state; + return -1; + } + + mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", + slave, port); + mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, + vp_admin->default_qos, vp_admin->link_state); + work = kzalloc(sizeof(*work), GFP_KERNEL); if (!work) return -ENOMEM; @@ -1572,6 +1597,10 @@ int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, vp_oper->state.default_vlan = vp_admin->default_vlan; vp_oper->state.default_qos = vp_admin->default_qos; + vp_oper->state.link_state = vp_admin->link_state; + + if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; /* iterate over QPs owned by this slave, using UPDATE_QP */ work->port = port; @@ -2201,10 +2230,6 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) } EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); -static int calculate_transition(u16 oper_vlan, u16 admin_vlan) -{ - return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); -} int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) { @@ -2212,7 +2237,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) struct mlx4_vport_oper_state *vf_oper; struct mlx4_vport_state *vf_admin; int slave; - enum mlx4_vlan_transition vlan_trans; if ((!mlx4_is_master(dev)) || !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) @@ -2234,16 +2258,10 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) vf_admin->default_vlan = vlan; vf_admin->default_qos = qos; - vlan_trans = calculate_transition(vf_oper->state.default_vlan, - vf_admin->default_vlan); - - if (priv->mfunc.master.slave_state[slave].active && - dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && - vlan_trans == MLX4_VLAN_TRANSITION_VST_VST) { - mlx4_info(dev, "updating vf %d port %d config params immediately\n", + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_info(dev, + "updating vf %d port %d config will take effect on next VF restart\n", vf, port); - mlx4_master_immediate_activate_vlan_qos(priv, slave, port); - } return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); @@ -2307,7 +2325,6 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_state *s_info; - struct mlx4_vport_oper_state *vp_oper; int slave; u8 link_stat_event; @@ -2337,14 +2354,16 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat link_state, slave, port); return -EINVAL; }; - /* update the admin & oper state on the link state */ s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; - vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; s_info->link_state = link_state; - vp_oper->state.link_state = link_state; /* send event */ mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); + + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_dbg(dev, + "updating vf %d port %d no link state HW enforcment\n", + vf, port); return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 5abcb6501e30..17d9277e33ef 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -574,6 +574,7 @@ struct mlx4_cmd { enum { MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, + MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2, }; struct mlx4_vf_immed_vlan_work { struct work_struct work; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 46323a20060c..f984a89c27df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -384,7 +384,17 @@ static int update_vport_qp_param(struct mlx4_dev *dev, /* force strip vlan by clear vsd */ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); - if (0 != vp_oper->state.default_vlan) { + + if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } else if (0 != vp_oper->state.default_vlan) { qpc->pri_path.vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | @@ -4002,8 +4012,14 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) goto out; - - if (!work->vlan_id) + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else if (!work->vlan_id) vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; else diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index f43e32aa054a..262deac02c9e 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -152,6 +152,8 @@ enum { /* fl */ }; enum { /* vlan_control */ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */ + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED = 1 << 4, MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2, MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0 -- cgit v1.2.3 From ad6276e0fe724b1c8ac3a0bf138d151665ab2349 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Fri, 28 Jun 2013 15:59:26 +0300 Subject: net: fix LLS debug_smp_processor_id() warning Our use of sched_clock is OK because we don't mind the side effects of calling it and occasionally waking up on a different CPU. When CONFIG_DEBUG_PREEMPT is on, disable preempt before calling sched_clock() so we don't trigger a debug_smp_processor_id() warning. Reported-by: Cody P Schafer Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- include/net/ll_poll.h | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 5bf2b3a6129e..6d45e6f6ff4c 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -37,20 +37,40 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; #define LL_FLUSH_FAILED -1 #define LL_FLUSH_BUSY -2 -/* we can use sched_clock() because we don't care much about precision +/* a wrapper to make debug_smp_processor_id() happy + * we can use sched_clock() because we don't care much about precision * we only care that the average is bounded - * we don't mind a ~2.5% imprecision so <<10 instead of *1000 + */ +#ifdef CONFIG_DEBUG_PREEMPT +static inline u64 ll_sched_clock(void) +{ + u64 rc; + + preempt_disable_notrace(); + rc = sched_clock(); + preempt_enable_no_resched_notrace(); + + return rc; +} +#else /* CONFIG_DEBUG_PREEMPT */ +static inline u64 ll_sched_clock(void) +{ + return sched_clock(); +} +#endif /* CONFIG_DEBUG_PREEMPT */ + +/* we don't mind a ~2.5% imprecision so <<10 instead of *1000 * sk->sk_ll_usec is a u_int so this can't overflow */ static inline u64 ll_sk_end_time(struct sock *sk) { - return ((u64)ACCESS_ONCE(sk->sk_ll_usec) << 10) + sched_clock(); + return ((u64)ACCESS_ONCE(sk->sk_ll_usec) << 10) + ll_sched_clock(); } /* in poll/select we use the global sysctl_net_ll_poll value */ static inline u64 ll_end_time(void) { - return ((u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10) + sched_clock(); + return ((u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10) + ll_sched_clock(); } static inline bool sk_valid_ll(struct sock *sk) @@ -61,7 +81,7 @@ static inline bool sk_valid_ll(struct sock *sk) static inline bool can_poll_ll(u64 end_time) { - return !time_after64(sched_clock(), end_time); + return !time_after64(ll_sched_clock(), end_time); } /* when used in sock_poll() nonblock is known at compile time to be true -- cgit v1.2.3 From 91e2fd337839319c7745e2cb84cfbf8cf1426a1a Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Fri, 28 Jun 2013 15:59:35 +0300 Subject: net: avoid calling sched_clock when LLS is off Change Low Latency Sockets code for select and poll so that when LLS is disabled sched_clock() is never called. Also, avoid sending POLL_LL to sockets if disabled. Reported-by: Andi Kleen Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- fs/select.c | 11 +++++++---- include/net/ll_poll.h | 17 +++++++++++++++-- 2 files changed, 22 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/select.c b/fs/select.c index 79b876eb91da..36540754bad7 100644 --- a/fs/select.c +++ b/fs/select.c @@ -402,7 +402,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) poll_table *wait; int retval, i, timed_out = 0; unsigned long slack = 0; - unsigned int ll_flag = POLL_LL; + unsigned int ll_flag = ll_get_flag(); u64 ll_time = ll_end_time(); rcu_read_lock(); @@ -497,7 +497,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) break; } - if (can_ll && can_poll_ll(ll_time)) + /* only if on, have sockets with POLL_LL and not out of time */ + if (ll_flag && can_ll && can_poll_ll(ll_time)) continue; /* @@ -768,7 +769,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, ktime_t expire, *to = NULL; int timed_out = 0, count = 0; unsigned long slack = 0; - unsigned int ll_flag = POLL_LL; + unsigned int ll_flag = ll_get_flag(); u64 ll_time = ll_end_time(); /* Optimise the no-wait case */ @@ -817,8 +818,10 @@ static int do_poll(unsigned int nfds, struct poll_list *list, if (count || timed_out) break; - if (can_ll && can_poll_ll(ll_time)) + /* only if on, have sockets with POLL_LL and not out of time */ + if (ll_flag && can_ll && can_poll_ll(ll_time)) continue; + /* * If this is the first loop and we have a timeout * given, then we convert to ktime_t and set the to diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 6d45e6f6ff4c..6c06f7c0a22d 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -37,6 +37,11 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; #define LL_FLUSH_FAILED -1 #define LL_FLUSH_BUSY -2 +static inline unsigned int ll_get_flag(void) +{ + return sysctl_net_ll_poll ? POLL_LL : 0; +} + /* a wrapper to make debug_smp_processor_id() happy * we can use sched_clock() because we don't care much about precision * we only care that the average is bounded @@ -67,10 +72,14 @@ static inline u64 ll_sk_end_time(struct sock *sk) return ((u64)ACCESS_ONCE(sk->sk_ll_usec) << 10) + ll_sched_clock(); } -/* in poll/select we use the global sysctl_net_ll_poll value */ +/* in poll/select we use the global sysctl_net_ll_poll value + * only call sched_clock() if enabled + */ static inline u64 ll_end_time(void) { - return ((u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10) + ll_sched_clock(); + u64 end_time = ACCESS_ONCE(sysctl_net_ll_poll); + + return end_time ? (end_time << 10) + ll_sched_clock() : 0; } static inline bool sk_valid_ll(struct sock *sk) @@ -141,6 +150,10 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) } #else /* CONFIG_NET_LL_RX_POLL */ +static inline unsigned long ll_get_flag(void) +{ + return 0; +} static inline u64 sk_ll_end_time(struct sock *sk) { -- cgit v1.2.3 From b1a5a34bd0b8767ea689e68f8ea513e9710b671e Mon Sep 17 00:00:00 2001 From: Changli Gao Date: Sat, 29 Jun 2013 00:15:51 +0800 Subject: net: Swap ver and type in pppoe_hdr Ver and type in pppoe_hdr should be swapped as defined by RFC2516 section-4. Signed-off-by: David S. Miller --- include/uapi/linux/if_pppox.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h index 0b46fd57c8f6..e36a4aecd311 100644 --- a/include/uapi/linux/if_pppox.h +++ b/include/uapi/linux/if_pppox.h @@ -135,11 +135,11 @@ struct pppoe_tag { struct pppoe_hdr { #if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 ver : 4; __u8 type : 4; + __u8 ver : 4; #elif defined(__BIG_ENDIAN_BITFIELD) - __u8 type : 4; __u8 ver : 4; + __u8 type : 4; #else #error "Please fix " #endif -- cgit v1.2.3 From bb33381d0c97cdee25f2cdab540b6e2bd16fa03b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 28 Jun 2013 19:49:40 +0200 Subject: net: sctp: rework debugging framework to use pr_debug and friends We should get rid of all own SCTP debug printk macros and use the ones that the kernel offers anyway instead. This makes the code more readable and conform to the kernel code, and offers all the features of dynamic debbuging that pr_debug() et al has, such as only turning on/off portions of debug messages at runtime through debugfs. The runtime cost of having CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing, is negligible [1]. If kernel debugging is completly turned off, then these statements will also compile into "empty" functions. While we're at it, we also need to change the Kconfig option as it /now/ only refers to the ifdef'ed code portions in outqueue.c that enable further debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code was enabled with this Kconfig option and has now been removed, we transform those code parts into WARNs resp. where appropriate BUG_ONs so that those bugs can be more easily detected as probably not many people have SCTP debugging permanently turned on. To turn on all SCTP debugging, the following steps are needed: # mount -t debugfs none /sys/kernel/debug # echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control This can be done more fine-grained on a per file, per line basis and others as described in [2]. [1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf [2] Documentation/dynamic-debug-howto.txt Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 75 --------------------- net/sctp/Kconfig | 12 ++-- net/sctp/associola.c | 67 ++++++++++--------- net/sctp/chunk.c | 5 +- net/sctp/debug.c | 4 -- net/sctp/endpointola.c | 5 +- net/sctp/input.c | 6 +- net/sctp/inqueue.c | 9 ++- net/sctp/ipv6.c | 21 +++--- net/sctp/output.c | 40 +++++------ net/sctp/outqueue.c | 160 ++++++++++++++++++++------------------------ net/sctp/protocol.c | 48 +++++++------- net/sctp/sm_make_chunk.c | 25 +++---- net/sctp/sm_sideeffect.c | 96 +++++++++++---------------- net/sctp/sm_statefuns.c | 77 +++++++++++++--------- net/sctp/socket.c | 168 ++++++++++++++++++++++------------------------- net/sctp/transport.c | 49 +++++++------- 17 files changed, 383 insertions(+), 484 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index e6b95bc4d8e6..d8e37ecea691 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -83,16 +83,6 @@ #include #include - -/* Set SCTP_DEBUG flag via config if not already set. */ -#ifndef SCTP_DEBUG -#ifdef CONFIG_SCTP_DBG_MSG -#define SCTP_DEBUG 1 -#else -#define SCTP_DEBUG 0 -#endif /* CONFIG_SCTP_DBG */ -#endif /* SCTP_DEBUG */ - #ifdef CONFIG_IP_SCTP_MODULE #define SCTP_PROTOSW_FLAG 0 #else /* static! */ @@ -270,61 +260,6 @@ static inline void sctp_max_rto(struct sctp_association *asoc, } } -/* Print debugging messages. */ -#if SCTP_DEBUG -extern int sctp_debug_flag; -#define SCTP_DEBUG_PRINTK(fmt, args...) \ -do { \ - if (sctp_debug_flag) \ - printk(KERN_DEBUG pr_fmt(fmt), ##args); \ -} while (0) -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) \ -do { \ - if (sctp_debug_flag) \ - pr_cont(fmt, ##args); \ -} while (0) -#define SCTP_DEBUG_PRINTK_IPADDR(fmt_lead, fmt_trail, \ - args_lead, addr, args_trail...) \ -do { \ - const union sctp_addr *_addr = (addr); \ - if (sctp_debug_flag) { \ - if (_addr->sa.sa_family == AF_INET6) { \ - printk(KERN_DEBUG \ - pr_fmt(fmt_lead "%pI6" fmt_trail), \ - args_lead, \ - &_addr->v6.sin6_addr, \ - args_trail); \ - } else { \ - printk(KERN_DEBUG \ - pr_fmt(fmt_lead "%pI4" fmt_trail), \ - args_lead, \ - &_addr->v4.sin_addr.s_addr, \ - args_trail); \ - } \ - } \ -} while (0) -#define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; } -#define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; } - -#define SCTP_ASSERT(expr, str, func) \ - if (!(expr)) { \ - SCTP_DEBUG_PRINTK("Assertion Failed: %s(%s) at %s:%s:%d\n", \ - str, (#expr), __FILE__, __func__, __LINE__); \ - func; \ - } - -#else /* SCTP_DEBUG */ - -#define SCTP_DEBUG_PRINTK(whatever...) -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) -#define SCTP_ENABLE_DEBUG -#define SCTP_DISABLE_DEBUG -#define SCTP_ASSERT(expr, str, func) - -#endif /* SCTP_DEBUG */ - - /* * Macros for keeping a global reference of object allocations. */ @@ -597,16 +532,6 @@ static inline int param_type2af(__be16 type) } } -/* Perform some sanity checks. */ -static inline int sctp_sanity_check(void) -{ - SCTP_ASSERT(sizeof(struct sctp_ulpevent) <= - sizeof(((struct sk_buff *)0)->cb), - "SCTP: ulpevent does not fit in skb!\n", return 0); - - return 1; -} - /* Warning: The following hash functions assume a power of two 'size'. */ /* This is the hash function for the SCTP port hash table. */ static inline int sctp_phashfn(struct net *net, __u16 lport) diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index cf4852814e0c..d80bf1aebaed 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig @@ -30,7 +30,8 @@ menuconfig IP_SCTP homing at either or both ends of an association." To compile this protocol support as a module, choose M here: the - module will be called sctp. + module will be called sctp. Debug messages are handeled by the + kernel's dynamic debugging framework. If in doubt, say N. @@ -48,13 +49,14 @@ config NET_SCTPPROBE To compile this code as a module, choose M here: the module will be called sctp_probe. -config SCTP_DBG_MSG - bool "SCTP: Debug messages" +config SCTP_DBG_TSNS + bool "SCTP: Debug transactions" help - If you say Y, this will enable verbose debugging messages. + If you say Y, this will enable transaction debugging, visible + from the kernel's dynamic debugging framework. If unsure, say N. However, if you are running into problems, use - this option to gather detailed trace information + this option to gather outqueue trace information. config SCTP_DBG_OBJCNT bool "SCTP: Debug object counts" diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 9a383a8774e8..bce5b79662a6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -357,7 +357,8 @@ struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, goto fail_init; SCTP_DBG_OBJCNT_INC(assoc); - SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc); + + pr_debug("Created asoc %p\n", asoc); return asoc; @@ -455,7 +456,10 @@ void sctp_association_free(struct sctp_association *asoc) /* Cleanup and free up an association. */ static void sctp_association_destroy(struct sctp_association *asoc) { - SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return); + if (unlikely(!asoc->base.dead)) { + WARN(1, "Attempt to destroy undead association %p!\n", asoc); + return; + } sctp_endpoint_put(asoc->ep); sock_put(asoc->base.sk); @@ -536,11 +540,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, struct list_head *pos; struct sctp_transport *transport; - SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ", - " port: %d\n", - asoc, - (&peer->ipaddr), - ntohs(peer->ipaddr.v4.sin_port)); + pr_debug("%s: association:%p addr:%pISpc\n", + __func__, asoc, &peer->ipaddr.sa); /* If we are to remove the current retran_path, update it * to the next peer before removing this peer from the list. @@ -636,12 +637,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, /* AF_INET and AF_INET6 share common port field. */ port = ntohs(addr->v4.sin_port); - SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", - " port: %d state:%d\n", - asoc, - addr, - port, - peer_state); + pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__, + asoc, &addr->sa, peer_state); /* Set the port if it has not been set yet. */ if (0 == asoc->peer.port) @@ -708,8 +705,9 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, else asoc->pathmtu = peer->pathmtu; - SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " - "%d\n", asoc, asoc->pathmtu); + pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc, + asoc->pathmtu); + peer->pmtu_pending = 0; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); @@ -1349,12 +1347,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) else t = asoc->peer.retran_path; - SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" - " %p addr: ", - " port: %d\n", - asoc, - (&t->ipaddr), - ntohs(t->ipaddr.v4.sin_port)); + pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc, + &t->ipaddr.sa); } /* Choose the transport for sending retransmit packet. */ @@ -1401,8 +1395,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) asoc->frag_point = sctp_frag_point(asoc, pmtu); } - SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", - __func__, asoc, asoc->pathmtu, asoc->frag_point); + pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, + asoc->pathmtu, asoc->frag_point); } /* Should we send a SACK to update our peer? */ @@ -1454,9 +1448,9 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) asoc->rwnd_press -= change; } - SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " - "- %u\n", __func__, asoc, len, asoc->rwnd, - asoc->rwnd_over, asoc->a_rwnd); + pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", + __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, + asoc->a_rwnd); /* Send a window update SACK if the rwnd has increased by at least the * minimum of the association's PMTU and half of the receive buffer. @@ -1465,9 +1459,11 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) */ if (sctp_peer_needs_update(asoc)) { asoc->a_rwnd = asoc->rwnd; - SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " - "rwnd: %u a_rwnd: %u\n", __func__, - asoc, asoc->rwnd, asoc->a_rwnd); + + pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " + "a_rwnd:%u\n", __func__, asoc, asoc->rwnd, + asoc->a_rwnd); + sack = sctp_make_sack(asoc); if (!sack) return; @@ -1489,8 +1485,10 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) int rx_count; int over = 0; - SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); - SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); + if (unlikely(!asoc->rwnd || asoc->rwnd_over)) + pr_debug("%s: association:%p has asoc->rwnd:%u, " + "asoc->rwnd_over:%u!\n", __func__, asoc, + asoc->rwnd, asoc->rwnd_over); if (asoc->ep->rcvbuf_policy) rx_count = atomic_read(&asoc->rmem_alloc); @@ -1515,9 +1513,10 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) asoc->rwnd_over = len - asoc->rwnd; asoc->rwnd = 0; } - SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n", - __func__, asoc, len, asoc->rwnd, - asoc->rwnd_over, asoc->rwnd_press); + + pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", + __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, + asoc->rwnd_press); } /* Build the bind address list for the association based on info from the diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 7135fc0c087a..5780565f5b7d 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -193,8 +193,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, msg->expires_at = jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive); msg->can_abandon = 1; - SCTP_DEBUG_PRINTK("%s: msg:%p expires_at: %ld jiffies:%ld\n", - __func__, msg, msg->expires_at, jiffies); + + pr_debug("%s: msg:%p expires_at:%ld jiffies:%ld\n", __func__, + msg, msg->expires_at, jiffies); } /* This is the biggest possible DATA chunk that can fit into diff --git a/net/sctp/debug.c b/net/sctp/debug.c index ec997cfe0a7e..f4998780d6df 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -47,10 +47,6 @@ #include -#if SCTP_DEBUG -int sctp_debug_flag = 1; /* Initially enable DEBUG */ -#endif /* SCTP_DEBUG */ - /* These are printable forms of Chunk ID's from section 3.1. */ static const char *const sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { "DATA", diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index b26999d508ba..9e3d257de0e0 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -249,7 +249,10 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) { struct sock *sk; - SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); + if (unlikely(!ep->base.dead)) { + WARN(1, "Attempt to destroy undead endpoint %p!\n", ep); + return; + } /* Free the digest buffer */ kfree(ep->digest); diff --git a/net/sctp/input.c b/net/sctp/input.c index 4cfc74699a3f..3fa4d858c35a 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -454,8 +454,6 @@ void sctp_icmp_proto_unreachable(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t) { - SCTP_DEBUG_PRINTK("%s\n", __func__); - if (sock_owned_by_user(sk)) { if (timer_pending(&t->proto_unreach_timer)) return; @@ -464,10 +462,12 @@ void sctp_icmp_proto_unreachable(struct sock *sk, jiffies + (HZ/20))) sctp_association_hold(asoc); } - } else { struct net *net = sock_net(sk); + pr_debug("%s: unrecognized next header type " + "encountered!\n", __func__); + if (del_timer(&t->proto_unreach_timer)) sctp_association_put(asoc); diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 3221d073448c..cb25f040fed0 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -219,10 +219,10 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) chunk->end_of_packet = 1; } - SCTP_DEBUG_PRINTK("+++sctp_inq_pop+++ chunk %p[%s]," - " length %d, skb->len %d\n",chunk, - sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), - ntohs(chunk->chunk_hdr->length), chunk->skb->len); + pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n", + chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), + ntohs(chunk->chunk_hdr->length), chunk->skb->len); + return chunk; } @@ -238,4 +238,3 @@ void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) { INIT_WORK(&q->immediate, callback); } - diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index adeaa0e64f52..09ffcc912d23 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -239,9 +239,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) fl6.daddr = *rt0->addr; } - SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", - __func__, skb, skb->len, - &fl6.saddr, &fl6.daddr); + pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, + skb->len, &fl6.saddr, &fl6.daddr); SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); @@ -276,7 +275,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) fl6->flowi6_oif = daddr->v6.sin6_scope_id; - SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl6->daddr); + pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr); if (asoc) fl6->fl6_sport = htons(asoc->base.bind_addr.port); @@ -284,7 +283,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (saddr) { fl6->saddr = saddr->v6.sin6_addr; fl6->fl6_sport = saddr->v6.sin6_port; - SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr); + + pr_debug("src=%pI6 - ", &fl6->saddr); } dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); @@ -348,13 +348,16 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, out: if (!IS_ERR_OR_NULL(dst)) { struct rt6_info *rt; + rt = (struct rt6_info *)dst; t->dst = dst; - SCTP_DEBUG_PRINTK("rt6_dst:%pI6 rt6_src:%pI6\n", - &rt->rt6i_dst.addr, &fl6->saddr); + + pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr, + &fl6->saddr); } else { t->dst = NULL; - SCTP_DEBUG_PRINTK("NO ROUTE\n"); + + pr_debug("no route\n"); } } @@ -377,7 +380,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk, struct flowi6 *fl6 = &fl->u.ip6; union sctp_addr *saddr = &t->saddr; - SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst); + pr_debug("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst); if (t->dst) { saddr->v6.sin6_family = AF_INET6; diff --git a/net/sctp/output.c b/net/sctp/output.c index bbef4a7a9b56..a46d1eb41762 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -93,8 +93,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, { struct sctp_chunk *chunk = NULL; - SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, - packet, vtag); + pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); packet->vtag = vtag; @@ -119,8 +118,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, struct sctp_association *asoc = transport->asoc; size_t overhead; - SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __func__, - packet, transport); + pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport); packet->transport = transport; packet->source_port = sport; @@ -145,7 +143,7 @@ void sctp_packet_free(struct sctp_packet *packet) { struct sctp_chunk *chunk, *tmp; - SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); + pr_debug("%s: packet:%p\n", __func__, packet); list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); @@ -167,8 +165,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, sctp_xmit_t retval; int error = 0; - SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, - packet, chunk); + pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { case SCTP_XMIT_PMTU_FULL: @@ -334,8 +331,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, { sctp_xmit_t retval = SCTP_XMIT_OK; - SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, - chunk); + pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); /* Data chunks are special. Before seeing what else we can * bundle into this packet, check to see if we are allowed to @@ -402,7 +398,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) unsigned char *auth = NULL; /* pointer to auth in skb data */ __u32 cksum_buf_len = sizeof(struct sctphdr); - SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet); + pr_debug("%s: packet:%p\n", __func__, packet); /* Do NOT generate a chunkless packet. */ if (list_empty(&packet->chunk_list)) @@ -472,7 +468,9 @@ int sctp_packet_transmit(struct sctp_packet *packet) * * [This whole comment explains WORD_ROUND() below.] */ - SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); + + pr_debug("***sctp_transmit_packet***\n"); + list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); if (sctp_chunk_is_data(chunk)) { @@ -505,16 +503,13 @@ int sctp_packet_transmit(struct sctp_packet *packet) memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data, chunk->skb->len); - SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", - "*** Chunk", chunk, - sctp_cname(SCTP_ST_CHUNK( - chunk->chunk_hdr->type)), - chunk->has_tsn ? "TSN" : "No TSN", - chunk->has_tsn ? - ntohl(chunk->subh.data_hdr->tsn) : 0, - "length", ntohs(chunk->chunk_hdr->length), - "chunk->skb->len", chunk->skb->len, - "rtt_in_progress", chunk->rtt_in_progress); + pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, " + "rtt_in_progress:%d\n", chunk, + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), + chunk->has_tsn ? "TSN" : "No TSN", + chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, + ntohs(chunk->chunk_hdr->length), chunk->skb->len, + chunk->rtt_in_progress); /* * If this is a control chunk, this is our last @@ -606,8 +601,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) } } - SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", - nskb->len); + pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len); nskb->local_df = packet->ipfragok; (*tp->af_specific->sctp_xmit)(nskb, tp); diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index be35e2dbcc9a..511b3b35d609 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -299,10 +299,10 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) struct net *net = sock_net(q->asoc->base.sk); int error = 0; - SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n", - q, chunk, chunk && chunk->chunk_hdr ? - sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) - : "Illegal Chunk"); + pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, + chunk && chunk->chunk_hdr ? + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : + "illegal chunk"); /* If it is data, queue it up, otherwise, send it * immediately. @@ -328,10 +328,10 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) break; default: - SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n", - q, chunk, chunk && chunk->chunk_hdr ? - sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) - : "Illegal Chunk"); + pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n", + __func__, q, chunk, chunk && chunk->chunk_hdr ? + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : + "illegal chunk"); sctp_outq_tail_data(q, chunk); if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) @@ -460,14 +460,10 @@ void sctp_retransmit_mark(struct sctp_outq *q, } } - SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, " - "cwnd: %d, ssthresh: %d, flight_size: %d, " - "pba: %d\n", __func__, - transport, reason, - transport->cwnd, transport->ssthresh, - transport->flight_size, - transport->partial_bytes_acked); - + pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, " + "flight_size:%d, pba:%d\n", __func__, transport, reason, + transport->cwnd, transport->ssthresh, transport->flight_size, + transport->partial_bytes_acked); } /* Mark all the eligible packets on a transport for retransmission and force @@ -1014,19 +1010,13 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) sctp_transport_burst_limited(transport); } - SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", - q, chunk, - chunk && chunk->chunk_hdr ? - sctp_cname(SCTP_ST_CHUNK( - chunk->chunk_hdr->type)) - : "Illegal Chunk"); - - SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head " - "%p skb->users %d.\n", - ntohl(chunk->subh.data_hdr->tsn), - chunk->skb ?chunk->skb->head : NULL, - chunk->skb ? - atomic_read(&chunk->skb->users) : -1); + pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " + "skb->users:%d\n", + __func__, q, chunk, chunk && chunk->chunk_hdr ? + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : + "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), + chunk->skb ? chunk->skb->head : NULL, chunk->skb ? + atomic_read(&chunk->skb->users) : -1); /* Add the chunk to the packet. */ status = sctp_packet_transmit_chunk(packet, chunk, 0); @@ -1038,10 +1028,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) /* We could not append this chunk, so put * the chunk back on the output queue. */ - SCTP_DEBUG_PRINTK("sctp_outq_flush: could " - "not transmit TSN: 0x%x, status: %d\n", - ntohl(chunk->subh.data_hdr->tsn), - status); + pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", + __func__, ntohl(chunk->subh.data_hdr->tsn), + status); + sctp_outq_head_data(q, chunk); goto sctp_flush_out; break; @@ -1284,11 +1274,10 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) sctp_generate_fwdtsn(q, sack_ctsn); - SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", - __func__, sack_ctsn); - SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " - "%p is 0x%x. Adv peer ack point: 0x%x\n", - __func__, asoc, ctsn, asoc->adv_peer_ack_point); + pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn); + pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, " + "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, + asoc->adv_peer_ack_point); /* See if all chunks are acked. * Make sure the empty queue handler will get run later. @@ -1304,7 +1293,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) goto finish; } - SCTP_DEBUG_PRINTK("sack queue is empty.\n"); + pr_debug("%s: sack queue is empty\n", __func__); finish: return q->empty; } @@ -1348,7 +1337,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, /* These state variables are for coherent debug output. --xguo */ -#if SCTP_DEBUG +#ifdef CONFIG_SCTP_DBG_TSNS __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */ __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */ __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */ @@ -1359,7 +1348,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, * -1: We need to initialize. */ int dbg_prt_state = -1; -#endif /* SCTP_DEBUG */ +#endif /* CONFIG_SCTP_DBG_TSNS */ sack_ctsn = ntohl(sack->cum_tsn_ack); @@ -1483,7 +1472,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, list_add_tail(lchunk, &tlist); } -#if SCTP_DEBUG +#ifdef CONFIG_SCTP_DBG_TSNS switch (dbg_prt_state) { case 0: /* last TSN was ACKed */ if (dbg_last_ack_tsn + 1 == tsn) { @@ -1497,42 +1486,39 @@ static void sctp_check_transmitted(struct sctp_outq *q, /* Display the end of the * current range. */ - SCTP_DEBUG_PRINTK_CONT("-%08x", - dbg_last_ack_tsn); + pr_cont("-%08x", dbg_last_ack_tsn); } /* Start a new range. */ - SCTP_DEBUG_PRINTK_CONT(",%08x", tsn); + pr_cont(",%08x", tsn); dbg_ack_tsn = tsn; break; case 1: /* The last TSN was NOT ACKed. */ if (dbg_last_kept_tsn != dbg_kept_tsn) { /* Display the end of current range. */ - SCTP_DEBUG_PRINTK_CONT("-%08x", - dbg_last_kept_tsn); + pr_cont("-%08x", dbg_last_kept_tsn); } - SCTP_DEBUG_PRINTK_CONT("\n"); - + pr_cont("\n"); /* FALL THROUGH... */ default: /* This is the first-ever TSN we examined. */ /* Start a new range of ACK-ed TSNs. */ - SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); + pr_debug("ACKed: %08x", tsn); + dbg_prt_state = 0; dbg_ack_tsn = tsn; } dbg_last_ack_tsn = tsn; -#endif /* SCTP_DEBUG */ +#endif /* CONFIG_SCTP_DBG_TSNS */ } else { if (tchunk->tsn_gap_acked) { - SCTP_DEBUG_PRINTK("%s: Receiver reneged on " - "data TSN: 0x%x\n", - __func__, - tsn); + pr_debug("%s: receiver reneged on data TSN:0x%x\n", + __func__, tsn); + tchunk->tsn_gap_acked = 0; if (tchunk->transport) @@ -1552,7 +1538,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, list_add_tail(lchunk, &tlist); -#if SCTP_DEBUG +#ifdef CONFIG_SCTP_DBG_TSNS /* See the above comments on ACK-ed TSNs. */ switch (dbg_prt_state) { case 1: @@ -1560,50 +1546,47 @@ static void sctp_check_transmitted(struct sctp_outq *q, break; if (dbg_last_kept_tsn != dbg_kept_tsn) - SCTP_DEBUG_PRINTK_CONT("-%08x", - dbg_last_kept_tsn); + pr_cont("-%08x", dbg_last_kept_tsn); - SCTP_DEBUG_PRINTK_CONT(",%08x", tsn); + pr_cont(",%08x", tsn); dbg_kept_tsn = tsn; break; case 0: if (dbg_last_ack_tsn != dbg_ack_tsn) - SCTP_DEBUG_PRINTK_CONT("-%08x", - dbg_last_ack_tsn); - SCTP_DEBUG_PRINTK_CONT("\n"); + pr_cont("-%08x", dbg_last_ack_tsn); + pr_cont("\n"); /* FALL THROUGH... */ default: - SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); + pr_debug("KEPT: %08x", tsn); + dbg_prt_state = 1; dbg_kept_tsn = tsn; } dbg_last_kept_tsn = tsn; -#endif /* SCTP_DEBUG */ +#endif /* CONFIG_SCTP_DBG_TSNS */ } } -#if SCTP_DEBUG +#ifdef CONFIG_SCTP_DBG_TSNS /* Finish off the last range, displaying its ending TSN. */ switch (dbg_prt_state) { case 0: - if (dbg_last_ack_tsn != dbg_ack_tsn) { - SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn); - } else { - SCTP_DEBUG_PRINTK_CONT("\n"); - } - break; - + if (dbg_last_ack_tsn != dbg_ack_tsn) + pr_cont("-%08x\n", dbg_last_ack_tsn); + else + pr_cont("\n"); + break; case 1: - if (dbg_last_kept_tsn != dbg_kept_tsn) { - SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn); - } else { - SCTP_DEBUG_PRINTK_CONT("\n"); - } + if (dbg_last_kept_tsn != dbg_kept_tsn) + pr_cont("-%08x\n", dbg_last_kept_tsn); + else + pr_cont("\n"); + break; } -#endif /* SCTP_DEBUG */ +#endif /* CONFIG_SCTP_DBG_TSNS */ if (transport) { if (bytes_acked) { struct sctp_association *asoc = transport->asoc; @@ -1676,9 +1659,9 @@ static void sctp_check_transmitted(struct sctp_outq *q, !list_empty(&tlist) && (sack_ctsn+2 == q->asoc->next_tsn) && q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { - SCTP_DEBUG_PRINTK("%s: SACK received for zero " - "window probe: %u\n", - __func__, sack_ctsn); + pr_debug("%s: sack received for zero window " + "probe:%u\n", __func__, sack_ctsn); + q->asoc->overall_error_count = 0; transport->error_count = 0; } @@ -1739,10 +1722,8 @@ static void sctp_mark_missing(struct sctp_outq *q, count_of_newacks, tsn)) { chunk->tsn_missing_report++; - SCTP_DEBUG_PRINTK( - "%s: TSN 0x%x missing counter: %d\n", - __func__, tsn, - chunk->tsn_missing_report); + pr_debug("%s: tsn:0x%x missing counter:%d\n", + __func__, tsn, chunk->tsn_missing_report); } } /* @@ -1762,11 +1743,10 @@ static void sctp_mark_missing(struct sctp_outq *q, if (do_fast_retransmit) sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); - SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " - "ssthresh: %d, flight_size: %d, pba: %d\n", - __func__, transport, transport->cwnd, - transport->ssthresh, transport->flight_size, - transport->partial_bytes_acked); + pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, " + "flight_size:%d, pba:%d\n", __func__, transport, + transport->cwnd, transport->ssthresh, + transport->flight_size, transport->partial_bytes_acked); } } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 1de49c802d83..4a17494d736c 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -451,8 +451,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl4->fl4_sport = saddr->v4.sin_port; } - SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", - __func__, &fl4->daddr, &fl4->saddr); + pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, + &fl4->saddr); rt = ip_route_output_key(sock_net(sk), fl4); if (!IS_ERR(rt)) @@ -513,10 +513,10 @@ out_unlock: out: t->dst = dst; if (dst) - SCTP_DEBUG_PRINTK("rt_dst:%pI4, rt_src:%pI4\n", - &fl4->daddr, &fl4->saddr); + pr_debug("rt_dst:%pI4, rt_src:%pI4\n", + &fl4->daddr, &fl4->saddr); else - SCTP_DEBUG_PRINTK("NO ROUTE\n"); + pr_debug("no route\n"); } /* For v4, the source address is cached in the route entry(dst). So no need @@ -604,9 +604,9 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg) spin_lock_bh(&net->sctp.addr_wq_lock); list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) { - SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ", - " for cmd %d at entry %p\n", &net->sctp.addr_waitq, &addrw->a, addrw->state, - addrw); + pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at " + "entry:%p\n", __func__, &net->sctp.addr_waitq, &addrw->a.sa, + addrw->state, addrw); #if IS_ENABLED(CONFIG_IPV6) /* Now we send an ASCONF for each association */ @@ -623,8 +623,10 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg) addrw->state == SCTP_ADDR_NEW) { unsigned long timeo_val; - SCTP_DEBUG_PRINTK("sctp_timo_handler: this is on DAD, trying %d sec later\n", - SCTP_ADDRESS_TICK_DELAY); + pr_debug("%s: this is on DAD, trying %d sec " + "later\n", __func__, + SCTP_ADDRESS_TICK_DELAY); + timeo_val = jiffies; timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY); mod_timer(&net->sctp.addr_wq_timer, timeo_val); @@ -641,7 +643,7 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg) continue; sctp_bh_lock_sock(sk); if (sctp_asconf_mgmt(sp, addrw) < 0) - SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n"); + pr_debug("%s: sctp_asconf_mgmt failed\n", __func__); sctp_bh_unlock_sock(sk); } #if IS_ENABLED(CONFIG_IPV6) @@ -707,9 +709,10 @@ void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cm addrw = sctp_addr_wq_lookup(net, addr); if (addrw) { if (addrw->state != cmd) { - SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ", - " in wq %p\n", addrw->state, &addrw->a, - &net->sctp.addr_waitq); + pr_debug("%s: offsets existing entry for %d, addr:%pISc " + "in wq:%p\n", __func__, addrw->state, &addrw->a.sa, + &net->sctp.addr_waitq); + list_del(&addrw->list); kfree(addrw); } @@ -725,8 +728,9 @@ void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cm } addrw->state = cmd; list_add_tail(&addrw->list, &net->sctp.addr_waitq); - SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ", - " in wq %p\n", addrw->state, &addrw->a, &net->sctp.addr_waitq); + + pr_debug("%s: add new entry for cmd:%d, addr:%pISc in wq:%p\n", + __func__, addrw->state, &addrw->a.sa, &net->sctp.addr_waitq); if (!timer_pending(&net->sctp.addr_wq_timer)) { timeo_val = jiffies; @@ -952,15 +956,14 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, { struct inet_sock *inet = inet_sk(skb->sk); - SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", - __func__, skb, skb->len, - &transport->fl.u.ip4.saddr, - &transport->fl.u.ip4.daddr); + pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb, + skb->len, &transport->fl.u.ip4.saddr, &transport->fl.u.ip4.daddr); inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT; SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); + return ip_queue_xmit(skb, &transport->fl); } @@ -1321,9 +1324,8 @@ static __init int sctp_init(void) int max_share; int order; - /* SCTP_DEBUG sanity check. */ - if (!sctp_sanity_check()) - goto out; + BUILD_BUG_ON(sizeof(struct sctp_ulpevent) > + sizeof(((struct sk_buff *) 0)->cb)); /* Allocate bind_bucket and chunk caches. */ status = -ENOBUFS; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index dd71f1f9ba10..362ae6e2fd93 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -741,7 +741,8 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) memset(gabs, 0, sizeof(gabs)); ctsn = sctp_tsnmap_get_ctsn(map); - SCTP_DEBUG_PRINTK("sackCTSNAck sent: 0x%x.\n", ctsn); + + pr_debug("%s: sackCTSNAck sent:0x%x\n", __func__, ctsn); /* How much room is needed in the chunk? */ num_gabs = sctp_tsnmap_num_gabs(map, gabs); @@ -1287,10 +1288,8 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, if (!retval) goto nodata; - - if (!sk) { - SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb); - } + if (!sk) + pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb); INIT_LIST_HEAD(&retval->list); retval->skb = skb; @@ -2191,8 +2190,9 @@ static sctp_ierror_t sctp_verify_param(struct net *net, break; fallthrough: default: - SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", - ntohs(param.p->type), cid); + pr_debug("%s: unrecognized param:%d for chunk:%d\n", + __func__, ntohs(param.p->type), cid); + retval = sctp_process_unk_param(asoc, param, chunk, err_chunk); break; } @@ -2516,7 +2516,7 @@ do_addr_param: break; case SCTP_PARAM_HOST_NAME_ADDRESS: - SCTP_DEBUG_PRINTK("unimplemented SCTP_HOST_NAME_ADDRESS\n"); + pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__); break; case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: @@ -2662,8 +2662,8 @@ fall_through: * called prior to this routine. Simply log the error * here. */ - SCTP_DEBUG_PRINTK("Ignoring param: %d for association %p.\n", - ntohs(param.p->type), asoc); + pr_debug("%s: ignoring param:%d for association:%p.\n", + __func__, ntohs(param.p->type), asoc); break; } @@ -2805,7 +2805,10 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, totallen += paramlen; totallen += addr_param_len; del_pickup = 1; - SCTP_DEBUG_PRINTK("mkasconf_update_ip: picked same-scope del_pending addr, totallen for all addresses is %d\n", totallen); + + pr_debug("%s: picked same-scope del_pending addr, " + "totallen for all addresses is %d\n", + __func__, totallen); } } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index ff91f47b0239..cf6f84518222 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -257,7 +257,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer) sctp_bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { - SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); + pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) @@ -297,9 +297,8 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, sctp_bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { - SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n", - __func__, - timeout_type); + pr_debug("%s: sock is busy: timer %d\n", __func__, + timeout_type); /* Try again later. */ if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) @@ -377,7 +376,7 @@ void sctp_generate_heartbeat_event(unsigned long data) sctp_bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { - SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); + pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) @@ -415,7 +414,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) sctp_bh_lock_sock(asoc->base.sk); if (sock_owned_by_user(asoc->base.sk)) { - SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __func__); + pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->proto_unreach_timer, @@ -521,11 +520,9 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, if (transport->state != SCTP_INACTIVE && (transport->error_count > transport->pathmaxrxt)) { - SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", - " transport IP: port:%d failed.\n", - asoc, - (&transport->ipaddr), - ntohs(transport->ipaddr.v4.sin_port)); + pr_debug("%s: association:%p transport addr:%pISpc failed\n", + __func__, asoc, &transport->ipaddr.sa); + sctp_assoc_control_transport(asoc, transport, SCTP_TRANSPORT_DOWN, SCTP_FAILED_THRESHOLD); @@ -804,8 +801,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, asoc->state = state; - SCTP_DEBUG_PRINTK("sctp_cmd_new_state: asoc %p[%s]\n", - asoc, sctp_state_tbl[state]); + pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]); if (sctp_style(sk, TCP)) { /* Change the sk->sk_state of a TCP-style socket that has @@ -1017,15 +1013,11 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, asoc->timeouts[timer] = asoc->max_init_timeo; } asoc->init_cycle++; - SCTP_DEBUG_PRINTK( - "T1 %s Timeout adjustment" - " init_err_counter: %d" - " cycle: %d" - " timeout: %ld\n", - name, - asoc->init_err_counter, - asoc->init_cycle, - asoc->timeouts[timer]); + + pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" + " cycle:%d timeout:%ld\n", __func__, name, + asoc->init_err_counter, asoc->init_cycle, + asoc->timeouts[timer]); } } @@ -1080,23 +1072,19 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc) * main flow of sctp_do_sm() to keep attention focused on the real * functionality there. */ -#define DEBUG_PRE \ - SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \ - "ep %p, %s, %s, asoc %p[%s], %s\n", \ - ep, sctp_evttype_tbl[event_type], \ - (*debug_fn)(subtype), asoc, \ - sctp_state_tbl[state], state_fn->name) - -#define DEBUG_POST \ - SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \ - "asoc %p, status: %s\n", \ - asoc, sctp_status_tbl[status]) - -#define DEBUG_POST_SFX \ - SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \ - error, asoc, \ - sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ - sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED]) +#define debug_pre_sfn() \ + pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ + ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ + asoc, sctp_state_tbl[state], state_fn->name) + +#define debug_post_sfn() \ + pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ + sctp_status_tbl[status]) + +#define debug_post_sfx() \ + pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ + asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ + sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) /* * This is the master state machine processing function. @@ -1116,7 +1104,6 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, sctp_disposition_t status; int error = 0; typedef const char *(printfn_t)(sctp_subtype_t); - static printfn_t *table[] = { NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, }; @@ -1129,21 +1116,18 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, sctp_init_cmd_seq(&commands); - DEBUG_PRE; + debug_pre_sfn(); status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands); - DEBUG_POST; + debug_post_sfn(); error = sctp_side_effects(event_type, subtype, state, ep, asoc, event_arg, status, &commands, gfp); - DEBUG_POST_SFX; + debug_post_sfx(); return error; } -#undef DEBUG_PRE -#undef DEBUG_POST - /***************************************************************** * This the master state function side effect processing function. *****************************************************************/ @@ -1172,9 +1156,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, switch (status) { case SCTP_DISPOSITION_DISCARD: - SCTP_DEBUG_PRINTK("Ignored sctp protocol event - state %d, " - "event_type %d, event_id %d\n", - state, event_type, subtype.chunk); + pr_debug("%s: ignored sctp protocol event - state:%d, " + "event_type:%d, event_id:%d\n", __func__, state, + event_type, subtype.chunk); break; case SCTP_DISPOSITION_NOMEM: @@ -1425,18 +1409,18 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, case SCTP_CMD_CHUNK_ULP: /* Send a chunk to the sockets layer. */ - SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", - "chunk_up:", cmd->obj.chunk, - "ulpq:", &asoc->ulpq); + pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", + __func__, cmd->obj.chunk, &asoc->ulpq); + sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, GFP_ATOMIC); break; case SCTP_CMD_EVENT_ULP: /* Send a notification to the sockets layer. */ - SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", - "event_up:",cmd->obj.ulpevent, - "ulpq:",&asoc->ulpq); + pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", + __func__, cmd->obj.ulpevent, &asoc->ulpq); + sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); break; @@ -1601,7 +1585,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, break; case SCTP_CMD_REPORT_BAD_TAG: - SCTP_DEBUG_PRINTK("vtag mismatch!\n"); + pr_debug("%s: vtag mismatch!\n", __func__); break; case SCTP_CMD_STRIKE: diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index b3d186856513..f6b7109195a6 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1179,9 +1179,9 @@ sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net, /* Check if the timestamp looks valid. */ if (time_after(hbinfo->sent_at, jiffies) || time_after(jiffies, hbinfo->sent_at + max_interval)) { - SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp " - "received for transport: %p\n", - __func__, link); + pr_debug("%s: HEARTBEAT ACK with invalid timestamp received " + "for transport:%p\n", __func__, link); + return SCTP_DISPOSITION_DISCARD; } @@ -2562,7 +2562,8 @@ static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, const struct sctp_association *asoc, struct sctp_transport *transport) { - SCTP_DEBUG_PRINTK("ABORT received (INIT).\n"); + pr_debug("%s: ABORT received (INIT)\n", __func__); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); @@ -2572,6 +2573,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, /* CMD_INIT_FAILED will DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, SCTP_PERR(error)); + return SCTP_DISPOSITION_ABORT; } @@ -2637,8 +2639,9 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, ctsn = ntohl(sdh->cum_tsn_ack); if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { - SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); - SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); + pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, + asoc->ctsn_ack_point); + return SCTP_DISPOSITION_DISCARD; } @@ -2721,8 +2724,9 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, ctsn = ntohl(sdh->cum_tsn_ack); if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { - SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); - SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); + pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, + asoc->ctsn_ack_point); + return SCTP_DISPOSITION_DISCARD; } @@ -3174,8 +3178,9 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, * Point indicates an out-of-order SACK. */ if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { - SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); - SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); + pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, + asoc->ctsn_ack_point); + return SCTP_DISPOSITION_DISCARD; } @@ -3859,7 +3864,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, skb_pull(chunk->skb, len); tsn = ntohl(fwdtsn_hdr->new_cum_tsn); - SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn); + pr_debug("%s: TSN 0x%x\n", __func__, tsn); /* The TSN is too high--silently discard the chunk and count on it * getting retransmitted later. @@ -3927,7 +3932,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( skb_pull(chunk->skb, len); tsn = ntohl(fwdtsn_hdr->new_cum_tsn); - SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn); + pr_debug("%s: TSN 0x%x\n", __func__, tsn); /* The TSN is too high--silently discard the chunk and count on it * getting retransmitted later. @@ -4166,7 +4171,7 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net, struct sctp_chunk *err_chunk; sctp_chunkhdr_t *hdr; - SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk); + pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk); if (!sctp_vtag_verify(unk_chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); @@ -4256,7 +4261,8 @@ sctp_disposition_t sctp_sf_discard_chunk(struct net *net, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); - SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); + pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk); + return SCTP_DISPOSITION_DISCARD; } @@ -5184,7 +5190,9 @@ sctp_disposition_t sctp_sf_ignore_primitive( void *arg, sctp_cmd_seq_t *commands) { - SCTP_DEBUG_PRINTK("Primitive type %d is ignored.\n", type.primitive); + pr_debug("%s: primitive type:%d is ignored\n", __func__, + type.primitive); + return SCTP_DISPOSITION_DISCARD; } @@ -5379,7 +5387,9 @@ sctp_disposition_t sctp_sf_ignore_other(struct net *net, void *arg, sctp_cmd_seq_t *commands) { - SCTP_DEBUG_PRINTK("The event other type %d is ignored\n", type.other); + pr_debug("%s: the event other type:%d is ignored\n", + __func__, type.other); + return SCTP_DISPOSITION_DISCARD; } @@ -5527,7 +5537,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, struct sctp_bind_addr *bp; int attempts = asoc->init_err_counter + 1; - SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); + pr_debug("%s: timer T1 expired (INIT)\n", __func__); + SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS); if (attempts <= asoc->max_init_attempts) { @@ -5546,9 +5557,10 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); } else { - SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d" - " max_init_attempts: %d\n", - attempts, asoc->max_init_attempts); + pr_debug("%s: giving up on INIT, attempts:%d " + "max_init_attempts:%d\n", __func__, attempts, + asoc->max_init_attempts); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, @@ -5588,7 +5600,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net, struct sctp_chunk *repl = NULL; int attempts = asoc->init_err_counter + 1; - SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); + pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__); + SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS); if (attempts <= asoc->max_init_attempts) { @@ -5636,7 +5649,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net, { struct sctp_chunk *reply = NULL; - SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); + pr_debug("%s: timer T2 expired\n", __func__); + SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS); ((struct sctp_association *)asoc)->shutdown_retries++; @@ -5777,7 +5791,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net, { struct sctp_chunk *reply = NULL; - SCTP_DEBUG_PRINTK("Timer T5 expired.\n"); + pr_debug("%s: timer T5 expired\n", __func__); + SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS); reply = sctp_make_abort(asoc, NULL, 0); @@ -5892,7 +5907,8 @@ sctp_disposition_t sctp_sf_timer_ignore(struct net *net, void *arg, sctp_cmd_seq_t *commands) { - SCTP_DEBUG_PRINTK("Timer %d ignored.\n", type.chunk); + pr_debug("%s: timer %d ignored\n", __func__, type.chunk); + return SCTP_DISPOSITION_CONSUME; } @@ -6102,7 +6118,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); tsn = ntohl(data_hdr->tsn); - SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn); + pr_debug("%s: TSN 0x%x\n", __func__, tsn); /* ASSERT: Now skb->data is really the user data. */ @@ -6179,12 +6195,12 @@ static int sctp_eat_data(const struct sctp_association *asoc, */ if (sctp_tsnmap_has_gap(map) && (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { - SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); + pr_debug("%s: reneging for tsn:%u\n", __func__, tsn); deliver = SCTP_CMD_RENEGE; } else { - SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, " - "rwnd: %d\n", tsn, datalen, - asoc->rwnd); + pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n", + __func__, tsn, datalen, asoc->rwnd); + return SCTP_IERROR_IGNORE_TSN; } } @@ -6199,7 +6215,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, if (*sk->sk_prot_creator->memory_pressure) { if (sctp_tsnmap_has_gap(map) && (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { - SCTP_DEBUG_PRINTK("Under Pressure! Reneging for tsn:%u\n", tsn); + pr_debug("%s: under pressure, reneging for tsn:%u\n", + __func__, tsn); deliver = SCTP_CMD_RENEGE; } } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 66fcdcfe1b74..d5c6a2870473 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -281,8 +281,8 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) sctp_lock_sock(sk); - SCTP_DEBUG_PRINTK("sctp_bind(sk: %p, addr: %p, addr_len: %d)\n", - sk, addr, addr_len); + pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, + addr, addr_len); /* Disallow binding twice. */ if (!sctp_sk(sk)->ep->base.bind_addr.port) @@ -342,19 +342,15 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) /* Common sockaddr verification. */ af = sctp_sockaddr_af(sp, addr, len); if (!af) { - SCTP_DEBUG_PRINTK("sctp_do_bind(sk: %p, newaddr: %p, len: %d) EINVAL\n", - sk, addr, len); + pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", + __func__, sk, addr, len); return -EINVAL; } snum = ntohs(addr->v4.sin_port); - SCTP_DEBUG_PRINTK_IPADDR("sctp_do_bind(sk: %p, new addr: ", - ", port: %d, new port: %d, len: %d)\n", - sk, - addr, - bp->port, snum, - len); + pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", + __func__, sk, &addr->sa, bp->port, snum, len); /* PF specific bind() address verification. */ if (!sp->pf->bind_verify(sp, addr)) @@ -368,9 +364,8 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) if (!snum) snum = bp->port; else if (snum != bp->port) { - SCTP_DEBUG_PRINTK("sctp_do_bind:" - " New port %d does not match existing port " - "%d.\n", snum, bp->port); + pr_debug("%s: new port %d doesn't match existing port " + "%d\n", __func__, snum, bp->port); return -EINVAL; } } @@ -468,8 +463,8 @@ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) struct sockaddr *sa_addr; struct sctp_af *af; - SCTP_DEBUG_PRINTK("sctp_bindx_add (sk: %p, addrs: %p, addrcnt: %d)\n", - sk, addrs, addrcnt); + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, + addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { @@ -535,11 +530,10 @@ static int sctp_send_asconf_add_ip(struct sock *sk, sp = sctp_sk(sk); ep = sp->ep; - SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", - __func__, sk, addrs, addrcnt); + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", + __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { - if (!asoc->peer.asconf_capable) continue; @@ -646,8 +640,8 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) union sctp_addr *sa_addr; struct sctp_af *af; - SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n", - sk, addrs, addrcnt); + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", + __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { @@ -740,8 +734,8 @@ static int sctp_send_asconf_del_ip(struct sock *sk, sp = sctp_sk(sk); ep = sp->ep; - SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", - __func__, sk, addrs, addrcnt); + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", + __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { @@ -808,9 +802,11 @@ static int sctp_send_asconf_del_ip(struct sock *sk, sin6 = (struct sockaddr_in6 *)addrs; asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; } - SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ", - " at %p\n", asoc, asoc->asconf_addr_del_pending, - asoc->asconf_addr_del_pending); + + pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", + __func__, asoc, &asoc->asconf_addr_del_pending->sa, + asoc->asconf_addr_del_pending); + asoc->src_out_of_asoc_ok = 1; stored = 1; goto skip_mkasconf; @@ -972,8 +968,8 @@ static int sctp_setsockopt_bindx(struct sock* sk, void *addr_buf; struct sctp_af *af; - SCTP_DEBUG_PRINTK("sctp_setsockopt_bindx: sk %p addrs %p" - " addrs_size %d opt %d\n", sk, addrs, addrs_size, op); + pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", + __func__, sk, addrs, addrs_size, op); if (unlikely(addrs_size <= 0)) return -EINVAL; @@ -1231,10 +1227,9 @@ static int __sctp_connect(struct sock* sk, asoc = NULL; out_free: + pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", + __func__, asoc, kaddrs, err); - SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" - " kaddrs: %p err: %d\n", - asoc, kaddrs, err); if (asoc) { /* sctp_primitive_ASSOCIATE may have added this association * To the hash table, try to unhash it, just in case, its a noop @@ -1316,8 +1311,8 @@ static int __sctp_setsockopt_connectx(struct sock* sk, int err = 0; struct sockaddr *kaddrs; - SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n", - __func__, sk, addrs, addrs_size); + pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", + __func__, sk, addrs, addrs_size); if (unlikely(addrs_size <= 0)) return -EINVAL; @@ -1468,7 +1463,7 @@ static void sctp_close(struct sock *sk, long timeout) struct list_head *pos, *temp; unsigned int data_was_unread; - SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); + pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); sctp_lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; @@ -1594,14 +1589,12 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; - SCTP_DEBUG_PRINTK("sctp_sendmsg(sk: %p, msg: %p, msg_len: %zu)\n", - sk, msg, msg_len); - err = 0; sp = sctp_sk(sk); ep = sp->ep; - SCTP_DEBUG_PRINTK("Using endpoint: %p.\n", ep); + pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, + msg, msg_len, ep); /* We cannot send a message over a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { @@ -1611,9 +1604,8 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, /* Parse out the SCTP CMSGs. */ err = sctp_msghdr_parse(msg, &cmsgs); - if (err) { - SCTP_DEBUG_PRINTK("msghdr parse err = %x\n", err); + pr_debug("%s: msghdr parse err:%x\n", __func__, err); goto out_nounlock; } @@ -1645,8 +1637,8 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, associd = sinfo->sinfo_assoc_id; } - SCTP_DEBUG_PRINTK("msg_len: %zu, sinfo_flags: 0x%x\n", - msg_len, sinfo_flags); + pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, + msg_len, sinfo_flags); /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { @@ -1675,7 +1667,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, transport = NULL; - SCTP_DEBUG_PRINTK("About to look up association.\n"); + pr_debug("%s: about to look up association\n", __func__); sctp_lock_sock(sk); @@ -1705,7 +1697,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, } if (asoc) { - SCTP_DEBUG_PRINTK("Just looked up association: %p.\n", asoc); + pr_debug("%s: just looked up association:%p\n", __func__, asoc); /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED * socket that has an association in CLOSED state. This can @@ -1718,8 +1710,9 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, } if (sinfo_flags & SCTP_EOF) { - SCTP_DEBUG_PRINTK("Shutting down association: %p\n", - asoc); + pr_debug("%s: shutting down association:%p\n", + __func__, asoc); + sctp_primitive_SHUTDOWN(net, asoc, NULL); err = 0; goto out_unlock; @@ -1732,7 +1725,9 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_unlock; } - SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); + pr_debug("%s: aborting association:%p\n", + __func__, asoc); + sctp_primitive_ABORT(net, asoc, chunk); err = 0; goto out_unlock; @@ -1741,7 +1736,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, /* Do we need to create the association? */ if (!asoc) { - SCTP_DEBUG_PRINTK("There is no association yet.\n"); + pr_debug("%s: there is no association yet\n", __func__); if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { err = -EINVAL; @@ -1840,7 +1835,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, } /* ASSERT: we have a valid association at this point. */ - SCTP_DEBUG_PRINTK("We have a valid association.\n"); + pr_debug("%s: we have a valid association\n", __func__); if (!sinfo) { /* If the user didn't specify SNDRCVINFO, make up one with @@ -1909,7 +1904,8 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) goto out_free; - SCTP_DEBUG_PRINTK("We associated primitively.\n"); + + pr_debug("%s: we associated primitively\n", __func__); } /* Break the message into multiple chunks of maximum size. */ @@ -1936,17 +1932,15 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, */ err = sctp_primitive_SEND(net, asoc, datamsg); /* Did the lower layer accept the chunk? */ - if (err) + if (err) { sctp_datamsg_free(datamsg); - else - sctp_datamsg_put(datamsg); + goto out_free; + } - SCTP_DEBUG_PRINTK("We sent primitively.\n"); + pr_debug("%s: we sent primitively\n", __func__); - if (err) - goto out_free; - else - err = msg_len; + sctp_datamsg_put(datamsg); + err = msg_len; /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. @@ -2041,10 +2035,9 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, int err = 0; int skb_len; - SCTP_DEBUG_PRINTK("sctp_recvmsg(%s: %p, %s: %p, %s: %zd, %s: %d, %s: " - "0x%x, %s: %p)\n", "sk", sk, "msghdr", msg, - "len", len, "knoblauch", noblock, - "flags", flags, "addr_len", addr_len); + pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " + "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, + addr_len); sctp_lock_sock(sk); @@ -3086,7 +3079,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva err = sctp_send_asconf(asoc, chunk); - SCTP_DEBUG_PRINTK("We set peer primary addr primitively.\n"); + pr_debug("%s: we set peer primary addr primitively\n", __func__); return err; } @@ -3561,8 +3554,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, { int retval = 0; - SCTP_DEBUG_PRINTK("sctp_setsockopt(sk: %p... optname: %d)\n", - sk, optname); + pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft @@ -3724,8 +3716,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr, sctp_lock_sock(sk); - SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n", - __func__, sk, addr, addr_len); + pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, + addr, addr_len); /* Validate addr_len before calling common connect/connectx routine. */ af = sctp_get_af_specific(addr->sa_family); @@ -3855,7 +3847,7 @@ static int sctp_init_sock(struct sock *sk) struct net *net = sock_net(sk); struct sctp_sock *sp; - SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk); + pr_debug("%s: sk:%p\n", __func__, sk); sp = sctp_sk(sk); @@ -3990,7 +3982,7 @@ static void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp; - SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk); + pr_debug("%s: sk:%p\n", __func__, sk); /* Release our hold on the endpoint. */ sp = sctp_sk(sk); @@ -4123,9 +4115,9 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, goto out; } - SCTP_DEBUG_PRINTK("sctp_getsockopt_sctp_status(%d): %d %d %d\n", - len, status.sstat_state, status.sstat_rwnd, - status.sstat_assoc_id); + pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", + __func__, len, status.sstat_state, status.sstat_rwnd, + status.sstat_assoc_id); if (copy_to_user(optval, &status, len)) { retval = -EFAULT; @@ -4333,8 +4325,8 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval return PTR_ERR(newfile); } - SCTP_DEBUG_PRINTK("%s: sk: %p newsk: %p sd: %d\n", - __func__, sk, newsock->sk, retval); + pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, + retval); /* Return the fd mapped to the new socket. */ if (put_user(len, optlen)) { @@ -4467,7 +4459,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, trans = sctp_addr_id2transport(sk, ¶ms.spp_address, params.spp_assoc_id); if (!trans) { - SCTP_DEBUG_PRINTK("Failed no transport\n"); + pr_debug("%s: failed no transport\n", __func__); return -EINVAL; } } @@ -4478,7 +4470,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { - SCTP_DEBUG_PRINTK("Failed no association\n"); + pr_debug("%s: failed no association\n", __func__); return -EINVAL; } @@ -5698,8 +5690,7 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, if (put_user(len, optlen)) return -EFAULT; - SCTP_DEBUG_PRINTK("sctp_getsockopt_assoc_stat(%d): %d\n", - len, sas.sas_assoc_id); + pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); if (copy_to_user(optval, &sas, len)) return -EFAULT; @@ -5713,8 +5704,7 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, int retval = 0; int len; - SCTP_DEBUG_PRINTK("sctp_getsockopt(sk: %p... optname: %d)\n", - sk, optname); + pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft @@ -5894,7 +5884,8 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) snum = ntohs(addr->v4.sin_port); - SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum); + pr_debug("%s: begins, snum:%d\n", __func__, snum); + sctp_local_bh_disable(); if (snum == 0) { @@ -5960,7 +5951,8 @@ pp_found: int reuse = sk->sk_reuse; struct sock *sk2; - SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); + pr_debug("%s: found a possible match\n", __func__); + if (pp->fastreuse && sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; @@ -5990,7 +5982,8 @@ pp_found: goto fail_unlock; } } - SCTP_DEBUG_PRINTK("sctp_get_port(): Found a match\n"); + + pr_debug("%s: found a match\n", __func__); } pp_not_found: /* If there was a hash table miss, create a new port. */ @@ -6479,8 +6472,8 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, timeo = sock_rcvtimeo(sk, noblock); - SCTP_DEBUG_PRINTK("Timeout: timeo: %ld, MAX: %ld.\n", - timeo, MAX_SCHEDULE_TIMEOUT); + pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, + MAX_SCHEDULE_TIMEOUT); do { /* Again only user level code calls this function, @@ -6611,8 +6604,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, long current_timeo = *timeo_p; DEFINE_WAIT(wait); - SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n", - asoc, (long)(*timeo_p), msg_len); + pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, + *timeo_p, msg_len); /* Increment the association's refcnt. */ sctp_association_hold(asoc); @@ -6718,8 +6711,7 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) long current_timeo = *timeo_p; DEFINE_WAIT(wait); - SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __func__, asoc, - (long)(*timeo_p)); + pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); /* Increment the association's refcnt. */ sctp_association_hold(asoc); diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 5d3c71bbd197..bdbbc3fd7c14 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -176,7 +176,10 @@ static void sctp_transport_destroy_rcu(struct rcu_head *head) */ static void sctp_transport_destroy(struct sctp_transport *transport) { - SCTP_ASSERT(transport->dead, "Transport is not dead", return); + if (unlikely(!transport->dead)) { + WARN(1, "Attempt to destroy undead transport %p!\n", transport); + return; + } call_rcu(&transport->rcu, sctp_transport_destroy_rcu); @@ -317,11 +320,9 @@ void sctp_transport_put(struct sctp_transport *transport) /* Update transport's RTO based on the newly calculated RTT. */ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) { - /* Check for valid transport. */ - SCTP_ASSERT(tp, "NULL transport", return); - - /* We should not be doing any RTO updates unless rto_pending is set. */ - SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return); + if (unlikely(!tp->rto_pending)) + /* We should not be doing any RTO updates unless rto_pending is set. */ + pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); if (tp->rttvar || tp->srtt) { struct net *net = sock_net(tp->asoc->base.sk); @@ -377,9 +378,8 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) */ tp->rto_pending = 0; - SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " - "rttvar: %d, rto: %ld\n", __func__, - tp, rtt, tp->srtt, tp->rttvar, tp->rto); + pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n", + __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); } /* This routine updates the transport's cwnd and partial_bytes_acked @@ -433,12 +433,11 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, cwnd += pmtu; else cwnd += bytes_acked; - SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, " - "bytes_acked: %d, cwnd: %d, ssthresh: %d, " - "flight_size: %d, pba: %d\n", - __func__, - transport, bytes_acked, cwnd, - ssthresh, flight_size, pba); + + pr_debug("%s: slow start: transport:%p, bytes_acked:%d, " + "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n", + __func__, transport, bytes_acked, cwnd, ssthresh, + flight_size, pba); } else { /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, * upon each SACK arrival that advances the Cumulative TSN Ack @@ -459,12 +458,12 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, cwnd += pmtu; pba = ((cwnd < pba) ? (pba - cwnd) : 0); } - SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: " - "transport: %p, bytes_acked: %d, cwnd: %d, " - "ssthresh: %d, flight_size: %d, pba: %d\n", - __func__, - transport, bytes_acked, cwnd, - ssthresh, flight_size, pba); + + pr_debug("%s: congestion avoidance: transport:%p, " + "bytes_acked:%d, cwnd:%d, ssthresh:%d, " + "flight_size:%d, pba:%d\n", __func__, + transport, bytes_acked, cwnd, ssthresh, + flight_size, pba); } transport->cwnd = cwnd; @@ -558,10 +557,10 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, } transport->partial_bytes_acked = 0; - SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " - "%d ssthresh: %d\n", __func__, - transport, reason, - transport->cwnd, transport->ssthresh); + + pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n", + __func__, transport, reason, transport->cwnd, + transport->ssthresh); } /* Apply Max.Burst limit to the congestion window: -- cgit v1.2.3 From 8965779d2c0e6ab246c82a405236b1fb2adae6b2 Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Sat, 29 Jun 2013 21:30:49 +0800 Subject: ipv6,mcast: always hold idev->lock before mca_lock dingtianhong reported the following deadlock detected by lockdep: ====================================================== [ INFO: possible circular locking dependency detected ] 3.4.24.05-0.1-default #1 Not tainted ------------------------------------------------------- ksoftirqd/0/3 is trying to acquire lock: (&ndev->lock){+.+...}, at: [] ipv6_get_lladdr+0x74/0x120 but task is already holding lock: (&mc->mca_lock){+.+...}, at: [] mld_send_report+0x40/0x150 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&mc->mca_lock){+.+...}: [] validate_chain+0x637/0x730 [] __lock_acquire+0x2f7/0x500 [] lock_acquire+0x114/0x150 [] rt_spin_lock+0x4a/0x60 [] igmp6_group_added+0x3b/0x120 [] ipv6_mc_up+0x38/0x60 [] ipv6_find_idev+0x3d/0x80 [] addrconf_notify+0x3d5/0x4b0 [] notifier_call_chain+0x3f/0x80 [] raw_notifier_call_chain+0x11/0x20 [] call_netdevice_notifiers+0x32/0x60 [] __dev_notify_flags+0x34/0x80 [] dev_change_flags+0x40/0x70 [] do_setlink+0x237/0x8a0 [] rtnl_newlink+0x3ec/0x600 [] rtnetlink_rcv_msg+0x160/0x310 [] netlink_rcv_skb+0x89/0xb0 [] rtnetlink_rcv+0x27/0x40 [] netlink_unicast+0x140/0x180 [] netlink_sendmsg+0x33e/0x380 [] sock_sendmsg+0x112/0x130 [] __sys_sendmsg+0x44e/0x460 [] sys_sendmsg+0x44/0x70 [] system_call_fastpath+0x16/0x1b -> #0 (&ndev->lock){+.+...}: [] check_prev_add+0x3de/0x440 [] validate_chain+0x637/0x730 [] __lock_acquire+0x2f7/0x500 [] lock_acquire+0x114/0x150 [] rt_read_lock+0x42/0x60 [] ipv6_get_lladdr+0x74/0x120 [] mld_newpack+0xb6/0x160 [] add_grhead+0xab/0xc0 [] add_grec+0x3ab/0x460 [] mld_send_report+0x5a/0x150 [] igmp6_timer_handler+0x4e/0xb0 [] call_timer_fn+0xca/0x1d0 [] run_timer_softirq+0x1df/0x2e0 [] handle_pending_softirqs+0xf7/0x1f0 [] __do_softirq_common+0x7b/0xf0 [] __thread_do_softirq+0x1af/0x210 [] run_ksoftirqd+0xe1/0x1f0 [] kthread+0xae/0xc0 [] kernel_thread_helper+0x4/0x10 actually we can just hold idev->lock before taking pmc->mca_lock, and avoid taking idev->lock again when iterating idev->addr_list, since the upper callers of mld_newpack() already take read_lock_bh(&idev->lock). Reported-by: dingtianhong Cc: dingtianhong Cc: Hideaki YOSHIFUJI Cc: David S. Miller Cc: Hannes Frederic Sowa Tested-by: Ding Tianhong Tested-by: Chen Weilong Signed-off-by: Cong Wang Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/addrconf.h | 3 +++ net/ipv6/addrconf.c | 4 ++-- net/ipv6/mcast.c | 18 ++++++++++-------- 3 files changed, 15 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f68eaf574d7e..c7b181cb47a6 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -86,6 +86,9 @@ extern int ipv6_dev_get_saddr(struct net *net, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); +extern int __ipv6_get_lladdr(struct inet6_dev *idev, + struct in6_addr *addr, + unsigned char banned_flags); extern int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, unsigned char banned_flags); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 12dd2fec045c..75fd93bdd0d3 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1444,8 +1444,8 @@ try_nextdev: } EXPORT_SYMBOL(ipv6_dev_get_saddr); -static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, - unsigned char banned_flags) +int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, + unsigned char banned_flags) { struct inet6_ifaddr *ifp; int err = -EADDRNOTAVAIL; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 502c877cbf10..99cd65c715cd 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1351,8 +1351,9 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, hdr->daddr = *daddr; } -static struct sk_buff *mld_newpack(struct net_device *dev, int size) +static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) { + struct net_device *dev = idev->dev; struct net *net = dev_net(dev); struct sock *sk = net->ipv6.igmp_sk; struct sk_buff *skb; @@ -1377,7 +1378,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) skb_reserve(skb, hlen); - if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { + if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { /* : * use unspecified address as the source address * when a valid link-local address is not available. @@ -1474,7 +1475,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, struct mld2_grec *pgr; if (!skb) - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(pmc->idev, dev->mtu); if (!skb) return NULL; pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); @@ -1494,7 +1495,8 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) { - struct net_device *dev = pmc->idev->dev; + struct inet6_dev *idev = pmc->idev; + struct net_device *dev = idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr = NULL; struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; @@ -1523,7 +1525,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb) mld_sendpack(skb); - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(idev, dev->mtu); } } first = 1; @@ -1550,7 +1552,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, pgr->grec_nsrcs = htons(scount); if (skb) mld_sendpack(skb); - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(idev, dev->mtu); first = 1; scount = 0; } @@ -1605,8 +1607,8 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) struct sk_buff *skb = NULL; int type; + read_lock_bh(&idev->lock); if (!pmc) { - read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (pmc->mca_flags & MAF_NOREPORT) continue; @@ -1618,7 +1620,6 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->mca_lock); } - read_unlock_bh(&idev->lock); } else { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) @@ -1628,6 +1629,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->mca_lock); } + read_unlock_bh(&idev->lock); if (skb) mld_sendpack(skb); } -- cgit v1.2.3 From a05b1019cfe5250baf24e26e2e7112cf66054114 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 1 Jul 2013 18:10:36 +0200 Subject: net: sctp: prevent checksum.h from double inclusion The header file checksum.h is missing proper defines that prevents it from double inclusion. Signed-off-by: Daniel Borkmann Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/net/sctp/checksum.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h index 5a2110d3176d..0cb08e6fb6df 100644 --- a/include/net/sctp/checksum.h +++ b/include/net/sctp/checksum.h @@ -42,6 +42,9 @@ * be incorporated into the next SCTP release. */ +#ifndef __sctp_checksum_h__ +#define __sctp_checksum_h__ + #include #include #include @@ -81,3 +84,5 @@ static inline __le32 sctp_end_cksum(__u32 crc32) { return cpu_to_le32(~crc32); } + +#endif /* __sctp_checksum_h__ */ -- cgit v1.2.3 From b48410b4dc9c7306df21e508cc6739a55c350eb8 Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Mon, 1 Jul 2013 22:19:45 +0300 Subject: ipv4: remove fib_update_nh_saddrs() declaration. This patch removes the fib_update_nh_saddrs() declaration from include/net/ip_fib.h, as the fib_update_nh_saddrs() method was removed in coomit 436c3b6 ("ipv4: Invalidate nexthop cache nh_saddr more correctly"). Signed-off-by: Rami Rosen Signed-off-by: David S. Miller --- include/net/ip_fib.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index aac85534d7b5..cbf2be37c91a 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -291,7 +291,6 @@ static inline int fib_num_tclassid_users(struct net *net) extern int ip_fib_check_default(__be32 gw, struct net_device *dev); extern int fib_sync_down_dev(struct net_device *dev, int force); extern int fib_sync_down_addr(struct net *net, __be32 local); -extern void fib_update_nh_saddrs(struct net_device *dev); extern int fib_sync_up(struct net_device *dev); extern void fib_select_multipath(struct fib_result *res); -- cgit v1.2.3 From 8822b64a0fa64a5dd1dfcf837c5b0be83f8c05d1 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 1 Jul 2013 20:21:30 +0200 Subject: ipv6: call udp_push_pending_frames when uncorking a socket with AF_INET pending data We accidentally call down to ip6_push_pending_frames when uncorking pending AF_INET data on a ipv6 socket. This results in the following splat (from Dave Jones): skbuff: skb_under_panic: text:ffffffff816765f6 len:48 put:40 head:ffff88013deb6df0 data:ffff88013deb6dec tail:0x2c end:0xc0 dev: ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:126! invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC Modules linked in: dccp_ipv4 dccp 8021q garp bridge stp dlci mpoa snd_seq_dummy sctp fuse hidp tun bnep nfnetlink scsi_transport_iscsi rfcomm can_raw can_bcm af_802154 appletalk caif_socket can caif ipt_ULOG x25 rose af_key pppoe pppox ipx phonet irda llc2 ppp_generic slhc p8023 psnap p8022 llc crc_ccitt atm bluetooth +netrom ax25 nfc rfkill rds af_rxrpc coretemp hwmon kvm_intel kvm crc32c_intel snd_hda_codec_realtek ghash_clmulni_intel microcode pcspkr snd_hda_codec_hdmi snd_hda_intel snd_hda_codec snd_hwdep usb_debug snd_seq snd_seq_device snd_pcm e1000e snd_page_alloc snd_timer ptp snd pps_core soundcore xfs libcrc32c CPU: 2 PID: 8095 Comm: trinity-child2 Not tainted 3.10.0-rc7+ #37 task: ffff8801f52c2520 ti: ffff8801e6430000 task.ti: ffff8801e6430000 RIP: 0010:[] [] skb_panic+0x63/0x65 RSP: 0018:ffff8801e6431de8 EFLAGS: 00010282 RAX: 0000000000000086 RBX: ffff8802353d3cc0 RCX: 0000000000000006 RDX: 0000000000003b90 RSI: ffff8801f52c2ca0 RDI: ffff8801f52c2520 RBP: ffff8801e6431e08 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000001 R12: ffff88022ea0c800 R13: ffff88022ea0cdf8 R14: ffff8802353ecb40 R15: ffffffff81cc7800 FS: 00007f5720a10740(0000) GS:ffff880244c00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000005862000 CR3: 000000022843c000 CR4: 00000000001407e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000600 Stack: ffff88013deb6dec 000000000000002c 00000000000000c0 ffffffff81a3f6e4 ffff8801e6431e18 ffffffff8159a9aa ffff8801e6431e90 ffffffff816765f6 ffffffff810b756b 0000000700000002 ffff8801e6431e40 0000fea9292aa8c0 Call Trace: [] skb_push+0x3a/0x40 [] ip6_push_pending_frames+0x1f6/0x4d0 [] ? mark_held_locks+0xbb/0x140 [] udp_v6_push_pending_frames+0x2b9/0x3d0 [] ? udplite_getfrag+0x20/0x20 [] udp_lib_setsockopt+0x1aa/0x1f0 [] ? fget_light+0x387/0x4f0 [] udpv6_setsockopt+0x34/0x40 [] sock_common_setsockopt+0x14/0x20 [] SyS_setsockopt+0x71/0xd0 [] tracesys+0xdd/0xe2 Code: 00 00 48 89 44 24 10 8b 87 d8 00 00 00 48 89 44 24 08 48 8b 87 e8 00 00 00 48 c7 c7 c0 04 aa 81 48 89 04 24 31 c0 e8 e1 7e ff ff <0f> 0b 55 48 89 e5 0f 0b 55 48 89 e5 0f 0b 55 48 89 e5 0f 0b 55 RIP [] skb_panic+0x63/0x65 RSP This patch adds a check if the pending data is of address family AF_INET and directly calls udp_push_ending_frames from udp_v6_push_pending_frames if that is the case. This bug was found by Dave Jones with trinity. (Also move the initialization of fl6 below the AF_INET check, even if not strictly necessary.) Cc: Dave Jones Cc: YOSHIFUJI Hideaki Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/udp.h | 1 + net/ipv4/udp.c | 3 ++- net/ipv6/udp.c | 7 ++++++- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/udp.h b/include/net/udp.h index b30a71a51839..74c10ec5e74f 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -181,6 +181,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum, extern void udp_err(struct sk_buff *, u32); extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); +extern int udp_push_pending_frames(struct sock *sk); extern void udp_flush_pending_frames(struct sock *sk); extern int udp_rcv(struct sk_buff *skb); extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 959502afd8d9..6b270e53c207 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -800,7 +800,7 @@ send: /* * Push out all pending data as one UDP datagram. Socket is locked. */ -static int udp_push_pending_frames(struct sock *sk) +int udp_push_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); @@ -819,6 +819,7 @@ out: up->pending = 0; return err; } +EXPORT_SYMBOL(udp_push_pending_frames); int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f77e34c5a0e2..b6f31437a1f8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -959,11 +959,16 @@ static int udp_v6_push_pending_frames(struct sock *sk) struct udphdr *uh; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); - struct flowi6 *fl6 = &inet->cork.fl.u.ip6; + struct flowi6 *fl6; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; + if (up->pending == AF_INET) + return udp_push_pending_frames(sk); + + fl6 = &inet->cork.fl.u.ip6; + /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; -- cgit v1.2.3 From 1bc2774d866444c5b316fd1dc2b782f20c762cf4 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Tue, 2 Jul 2013 23:22:47 +0300 Subject: net: convert lls to use time_in_range() Time in range will fail safely if we move to a different cpu with an extremely large clock skew. Add time_in_range64() and convert lls to use it. changelog: v2 - fixed double call to sched_clock in can_poll_ll - fixed checkpatchisms Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- fs/select.c | 10 ++++++---- include/linux/jiffies.h | 4 ++++ include/net/ll_poll.h | 26 +++++++++++++++++--------- 3 files changed, 27 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/fs/select.c b/fs/select.c index 36540754bad7..f28a58592725 100644 --- a/fs/select.c +++ b/fs/select.c @@ -403,7 +403,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) int retval, i, timed_out = 0; unsigned long slack = 0; unsigned int ll_flag = ll_get_flag(); - u64 ll_time = ll_end_time(); + u64 ll_start = ll_start_time(ll_flag); + u64 ll_time = ll_run_time(); rcu_read_lock(); retval = max_select_fd(n, fds); @@ -498,7 +499,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) } /* only if on, have sockets with POLL_LL and not out of time */ - if (ll_flag && can_ll && can_poll_ll(ll_time)) + if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time)) continue; /* @@ -770,7 +771,8 @@ static int do_poll(unsigned int nfds, struct poll_list *list, int timed_out = 0, count = 0; unsigned long slack = 0; unsigned int ll_flag = ll_get_flag(); - u64 ll_time = ll_end_time(); + u64 ll_start = ll_start_time(ll_flag); + u64 ll_time = ll_run_time(); /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -819,7 +821,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, break; /* only if on, have sockets with POLL_LL and not out of time */ - if (ll_flag && can_ll && can_poll_ll(ll_time)) + if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time)) continue; /* diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 8fb8edf12417..97ba4e78a37e 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -139,6 +139,10 @@ static inline u64 get_jiffies_64(void) ((__s64)(a) - (__s64)(b) >= 0)) #define time_before_eq64(a,b) time_after_eq64(b,a) +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) + /* * These four macros compare jiffies and 'a' for convenience. */ diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 6c06f7c0a22d..b76f004f18a9 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -67,19 +67,23 @@ static inline u64 ll_sched_clock(void) /* we don't mind a ~2.5% imprecision so <<10 instead of *1000 * sk->sk_ll_usec is a u_int so this can't overflow */ -static inline u64 ll_sk_end_time(struct sock *sk) +static inline u64 ll_sk_run_time(struct sock *sk) { - return ((u64)ACCESS_ONCE(sk->sk_ll_usec) << 10) + ll_sched_clock(); + return (u64)ACCESS_ONCE(sk->sk_ll_usec) << 10; } /* in poll/select we use the global sysctl_net_ll_poll value * only call sched_clock() if enabled */ -static inline u64 ll_end_time(void) +static inline u64 ll_run_time(void) { - u64 end_time = ACCESS_ONCE(sysctl_net_ll_poll); + return (u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10; +} - return end_time ? (end_time << 10) + ll_sched_clock() : 0; +/* if flag is not set we don't need to know the time */ +static inline u64 ll_start_time(unsigned int flag) +{ + return flag ? ll_sched_clock() : 0; } static inline bool sk_valid_ll(struct sock *sk) @@ -88,9 +92,12 @@ static inline bool sk_valid_ll(struct sock *sk) !need_resched() && !signal_pending(current); } -static inline bool can_poll_ll(u64 end_time) +/* careful! time_in_range64 will evaluate now twice */ +static inline bool can_poll_ll(u64 start_time, u64 run_time) { - return !time_after64(ll_sched_clock(), end_time); + u64 now = ll_sched_clock(); + + return time_in_range64(now, start_time, start_time + run_time); } /* when used in sock_poll() nonblock is known at compile time to be true @@ -98,7 +105,8 @@ static inline bool can_poll_ll(u64 end_time) */ static inline bool sk_poll_ll(struct sock *sk, int nonblock) { - u64 end_time = nonblock ? 0 : ll_sk_end_time(sk); + u64 start_time = ll_start_time(!nonblock); + u64 run_time = ll_sk_run_time(sk); const struct net_device_ops *ops; struct napi_struct *napi; int rc = false; @@ -129,7 +137,7 @@ static inline bool sk_poll_ll(struct sock *sk, int nonblock) LINUX_MIB_LOWLATENCYRXPACKETS, rc); } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && - can_poll_ll(end_time)); + can_poll_ll(start_time, run_time)); rc = !skb_queue_empty(&sk->sk_receive_queue); out: -- cgit v1.2.3 From 419076f59fc5916bd200b45225c83437b37ab189 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Wed, 3 Jul 2013 06:41:24 +0300 Subject: net: lls fix build with allnoconfig correct placeholder declarations to prevent build breakage when !CONFIG_NET_LL_RX_POLL Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- include/net/ll_poll.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index b76f004f18a9..0d620ba19bc5 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -163,12 +163,12 @@ static inline unsigned long ll_get_flag(void) return 0; } -static inline u64 sk_ll_end_time(struct sock *sk) +static inline u64 ll_start_time(unsigned int flag) { return 0; } -static inline u64 ll_end_time(void) +static inline u64 ll_run_time(void) { return 0; } @@ -191,7 +191,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) { } -static inline bool can_poll_ll(u64 end_time) +static inline bool can_poll_ll(u64 start_time, u64 run_time) { return false; } -- cgit v1.2.3 From c50cd357887acf9fd7af3a5d492911bd825555a2 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 1 Jul 2013 19:24:00 +0200 Subject: net: gre: move GSO functions to gre_offload Similarly to TCP/UDP offloading, move all related GRE functions to gre_offload.c to make things more explicit and similar to the rest of the code. Suggested-by: Eric Dumazet Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/net/gre.h | 4 + net/ipv4/Makefile | 1 + net/ipv4/gre.c | 514 ------------------------------------------------- net/ipv4/gre_demux.c | 414 +++++++++++++++++++++++++++++++++++++++ net/ipv4/gre_offload.c | 127 ++++++++++++ 5 files changed, 546 insertions(+), 514 deletions(-) delete mode 100644 net/ipv4/gre.c create mode 100644 net/ipv4/gre_demux.c create mode 100644 net/ipv4/gre_offload.c (limited to 'include') diff --git a/include/net/gre.h b/include/net/gre.h index a5a4ddf05300..57e4afdf7879 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -32,6 +32,10 @@ struct gre_cisco_protocol { int gre_cisco_register(struct gre_cisco_protocol *proto); int gre_cisco_unregister(struct gre_cisco_protocol *proto); + +int gre_offload_init(void); +void gre_offload_exit(void); + void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, int hdr_len); struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum); diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 86ded0bac9c7..4b81e91c80fe 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o obj-$(CONFIG_IP_MROUTE) += ipmr.o obj-$(CONFIG_NET_IPIP) += ipip.o +gre-y := gre_demux.o gre_offload.o obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o obj-$(CONFIG_NET_IPGRE) += ip_gre.o obj-$(CONFIG_NET_IPVTI) += ip_vti.o diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c deleted file mode 100644 index ba4803e609b5..000000000000 --- a/net/ipv4/gre.c +++ /dev/null @@ -1,514 +0,0 @@ -/* - * GRE over IPv4 demultiplexer driver - * - * Authors: Dmitry Kozlov (xeb@mail.ru) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; -static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX]; - -int gre_add_protocol(const struct gre_protocol *proto, u8 version) -{ - if (version >= GREPROTO_MAX) - return -EINVAL; - - return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ? - 0 : -EBUSY; -} -EXPORT_SYMBOL_GPL(gre_add_protocol); - -int gre_del_protocol(const struct gre_protocol *proto, u8 version) -{ - int ret; - - if (version >= GREPROTO_MAX) - return -EINVAL; - - ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ? - 0 : -EBUSY; - - if (ret) - return ret; - - synchronize_rcu(); - return 0; -} -EXPORT_SYMBOL_GPL(gre_del_protocol); - -void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, - int hdr_len) -{ - struct gre_base_hdr *greh; - - skb_push(skb, hdr_len); - - greh = (struct gre_base_hdr *)skb->data; - greh->flags = tnl_flags_to_gre_flags(tpi->flags); - greh->protocol = tpi->proto; - - if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) { - __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); - - if (tpi->flags&TUNNEL_SEQ) { - *ptr = tpi->seq; - ptr--; - } - if (tpi->flags&TUNNEL_KEY) { - *ptr = tpi->key; - ptr--; - } - if (tpi->flags&TUNNEL_CSUM && - !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) { - *ptr = 0; - *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, - skb->len, 0)); - } - } -} -EXPORT_SYMBOL_GPL(gre_build_header); - -struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum) -{ - int err; - - if (likely(!skb->encapsulation)) { - skb_reset_inner_headers(skb); - skb->encapsulation = 1; - } - - if (skb_is_gso(skb)) { - err = skb_unclone(skb, GFP_ATOMIC); - if (unlikely(err)) - goto error; - skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; - return skb; - } else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) { - err = skb_checksum_help(skb); - if (unlikely(err)) - goto error; - } else if (skb->ip_summed != CHECKSUM_PARTIAL) - skb->ip_summed = CHECKSUM_NONE; - - return skb; -error: - kfree_skb(skb); - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(gre_handle_offloads); - -static __sum16 check_checksum(struct sk_buff *skb) -{ - __sum16 csum = 0; - - switch (skb->ip_summed) { - case CHECKSUM_COMPLETE: - csum = csum_fold(skb->csum); - - if (!csum) - break; - /* Fall through. */ - - case CHECKSUM_NONE: - skb->csum = 0; - csum = __skb_checksum_complete(skb); - skb->ip_summed = CHECKSUM_COMPLETE; - break; - } - - return csum; -} - -static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err) -{ - unsigned int ip_hlen = ip_hdrlen(skb); - const struct gre_base_hdr *greh; - __be32 *options; - int hdr_len; - - if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) - return -EINVAL; - - greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); - if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) - return -EINVAL; - - tpi->flags = gre_flags_to_tnl_flags(greh->flags); - hdr_len = ip_gre_calc_hlen(tpi->flags); - - if (!pskb_may_pull(skb, hdr_len)) - return -EINVAL; - - greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); - tpi->proto = greh->protocol; - - options = (__be32 *)(greh + 1); - if (greh->flags & GRE_CSUM) { - if (check_checksum(skb)) { - *csum_err = true; - return -EINVAL; - } - options++; - } - - if (greh->flags & GRE_KEY) { - tpi->key = *options; - options++; - } else - tpi->key = 0; - - if (unlikely(greh->flags & GRE_SEQ)) { - tpi->seq = *options; - options++; - } else - tpi->seq = 0; - - /* WCCP version 1 and 2 protocol decoding. - * - Change protocol to IP - * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header - */ - if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { - tpi->proto = htons(ETH_P_IP); - if ((*(u8 *)options & 0xF0) != 0x40) { - hdr_len += 4; - if (!pskb_may_pull(skb, hdr_len)) - return -EINVAL; - } - } - - return iptunnel_pull_header(skb, hdr_len, tpi->proto); -} - -static int gre_cisco_rcv(struct sk_buff *skb) -{ - struct tnl_ptk_info tpi; - int i; - bool csum_err = false; - - if (parse_gre_header(skb, &tpi, &csum_err) < 0) - goto drop; - - rcu_read_lock(); - for (i = 0; i < GRE_IP_PROTO_MAX; i++) { - struct gre_cisco_protocol *proto; - int ret; - - proto = rcu_dereference(gre_cisco_proto_list[i]); - if (!proto) - continue; - ret = proto->handler(skb, &tpi); - if (ret == PACKET_RCVD) { - rcu_read_unlock(); - return 0; - } - } - rcu_read_unlock(); - - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); -drop: - kfree_skb(skb); - return 0; -} - -static void gre_cisco_err(struct sk_buff *skb, u32 info) -{ - /* All the routers (except for Linux) return only - * 8 bytes of packet payload. It means, that precise relaying of - * ICMP in the real Internet is absolutely infeasible. - * - * Moreover, Cisco "wise men" put GRE key to the third word - * in GRE header. It makes impossible maintaining even soft - * state for keyed - * GRE tunnels with enabled checksum. Tell them "thank you". - * - * Well, I wonder, rfc1812 was written by Cisco employee, - * what the hell these idiots break standards established - * by themselves??? - */ - - const int type = icmp_hdr(skb)->type; - const int code = icmp_hdr(skb)->code; - struct tnl_ptk_info tpi; - bool csum_err = false; - int i; - - if (parse_gre_header(skb, &tpi, &csum_err)) { - if (!csum_err) /* ignore csum errors. */ - return; - } - - if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { - ipv4_update_pmtu(skb, dev_net(skb->dev), info, - skb->dev->ifindex, 0, IPPROTO_GRE, 0); - return; - } - if (type == ICMP_REDIRECT) { - ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, - IPPROTO_GRE, 0); - return; - } - - rcu_read_lock(); - for (i = 0; i < GRE_IP_PROTO_MAX; i++) { - struct gre_cisco_protocol *proto; - - proto = rcu_dereference(gre_cisco_proto_list[i]); - if (!proto) - continue; - - if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD) - goto out; - - } -out: - rcu_read_unlock(); -} - -static int gre_rcv(struct sk_buff *skb) -{ - const struct gre_protocol *proto; - u8 ver; - int ret; - - if (!pskb_may_pull(skb, 12)) - goto drop; - - ver = skb->data[1]&0x7f; - if (ver >= GREPROTO_MAX) - goto drop; - - rcu_read_lock(); - proto = rcu_dereference(gre_proto[ver]); - if (!proto || !proto->handler) - goto drop_unlock; - ret = proto->handler(skb); - rcu_read_unlock(); - return ret; - -drop_unlock: - rcu_read_unlock(); -drop: - kfree_skb(skb); - return NET_RX_DROP; -} - -static void gre_err(struct sk_buff *skb, u32 info) -{ - const struct gre_protocol *proto; - const struct iphdr *iph = (const struct iphdr *)skb->data; - u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f; - - if (ver >= GREPROTO_MAX) - return; - - rcu_read_lock(); - proto = rcu_dereference(gre_proto[ver]); - if (proto && proto->err_handler) - proto->err_handler(skb, info); - rcu_read_unlock(); -} - -static struct sk_buff *gre_gso_segment(struct sk_buff *skb, - netdev_features_t features) -{ - struct sk_buff *segs = ERR_PTR(-EINVAL); - netdev_features_t enc_features; - int ghl = GRE_HEADER_SECTION; - struct gre_base_hdr *greh; - int mac_len = skb->mac_len; - __be16 protocol = skb->protocol; - int tnl_hlen; - bool csum; - - if (unlikely(skb_shinfo(skb)->gso_type & - ~(SKB_GSO_TCPV4 | - SKB_GSO_TCPV6 | - SKB_GSO_UDP | - SKB_GSO_DODGY | - SKB_GSO_TCP_ECN | - SKB_GSO_GRE))) - goto out; - - if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) - goto out; - - greh = (struct gre_base_hdr *)skb_transport_header(skb); - - if (greh->flags & GRE_KEY) - ghl += GRE_HEADER_SECTION; - if (greh->flags & GRE_SEQ) - ghl += GRE_HEADER_SECTION; - if (greh->flags & GRE_CSUM) { - ghl += GRE_HEADER_SECTION; - csum = true; - } else - csum = false; - - /* setup inner skb. */ - skb->protocol = greh->protocol; - skb->encapsulation = 0; - - if (unlikely(!pskb_may_pull(skb, ghl))) - goto out; - __skb_pull(skb, ghl); - skb_reset_mac_header(skb); - skb_set_network_header(skb, skb_inner_network_offset(skb)); - skb->mac_len = skb_inner_network_offset(skb); - - /* segment inner packet. */ - enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); - segs = skb_mac_gso_segment(skb, enc_features); - if (!segs || IS_ERR(segs)) - goto out; - - skb = segs; - tnl_hlen = skb_tnl_header_len(skb); - do { - __skb_push(skb, ghl); - if (csum) { - __be32 *pcsum; - - if (skb_has_shared_frag(skb)) { - int err; - - err = __skb_linearize(skb); - if (err) { - kfree_skb(segs); - segs = ERR_PTR(err); - goto out; - } - } - - greh = (struct gre_base_hdr *)(skb->data); - pcsum = (__be32 *)(greh + 1); - *pcsum = 0; - *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); - } - __skb_push(skb, tnl_hlen - ghl); - - skb_reset_mac_header(skb); - skb_set_network_header(skb, mac_len); - skb->mac_len = mac_len; - skb->protocol = protocol; - } while ((skb = skb->next)); -out: - return segs; -} - -static int gre_gso_send_check(struct sk_buff *skb) -{ - if (!skb->encapsulation) - return -EINVAL; - return 0; -} - -static const struct net_protocol net_gre_protocol = { - .handler = gre_rcv, - .err_handler = gre_err, - .netns_ok = 1, -}; - -static const struct net_offload gre_offload = { - .callbacks = { - .gso_send_check = gre_gso_send_check, - .gso_segment = gre_gso_segment, - }, -}; - -static const struct gre_protocol ipgre_protocol = { - .handler = gre_cisco_rcv, - .err_handler = gre_cisco_err, -}; - -int gre_cisco_register(struct gre_cisco_protocol *newp) -{ - struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **) - &gre_cisco_proto_list[newp->priority]; - - return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY; -} -EXPORT_SYMBOL_GPL(gre_cisco_register); - -int gre_cisco_unregister(struct gre_cisco_protocol *del_proto) -{ - struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **) - &gre_cisco_proto_list[del_proto->priority]; - int ret; - - ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL; - - if (ret) - return ret; - - synchronize_net(); - return 0; -} -EXPORT_SYMBOL_GPL(gre_cisco_unregister); - -static int __init gre_init(void) -{ - pr_info("GRE over IPv4 demultiplexor driver\n"); - - if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { - pr_err("can't add protocol\n"); - goto err; - } - - if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) { - pr_info("%s: can't add ipgre handler\n", __func__); - goto err_gre; - } - - if (inet_add_offload(&gre_offload, IPPROTO_GRE)) { - pr_err("can't add protocol offload\n"); - goto err_gso; - } - - return 0; -err_gso: - gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); -err_gre: - inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); -err: - return -EAGAIN; -} - -static void __exit gre_exit(void) -{ - inet_del_offload(&gre_offload, IPPROTO_GRE); - gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); - inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); -} - -module_init(gre_init); -module_exit(gre_exit); - -MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver"); -MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); -MODULE_LICENSE("GPL"); diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c new file mode 100644 index 000000000000..736c9fc3ef93 --- /dev/null +++ b/net/ipv4/gre_demux.c @@ -0,0 +1,414 @@ +/* + * GRE over IPv4 demultiplexer driver + * + * Authors: Dmitry Kozlov (xeb@mail.ru) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; +static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX]; + +int gre_add_protocol(const struct gre_protocol *proto, u8 version) +{ + if (version >= GREPROTO_MAX) + return -EINVAL; + + return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ? + 0 : -EBUSY; +} +EXPORT_SYMBOL_GPL(gre_add_protocol); + +int gre_del_protocol(const struct gre_protocol *proto, u8 version) +{ + int ret; + + if (version >= GREPROTO_MAX) + return -EINVAL; + + ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ? + 0 : -EBUSY; + + if (ret) + return ret; + + synchronize_rcu(); + return 0; +} +EXPORT_SYMBOL_GPL(gre_del_protocol); + +void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, + int hdr_len) +{ + struct gre_base_hdr *greh; + + skb_push(skb, hdr_len); + + greh = (struct gre_base_hdr *)skb->data; + greh->flags = tnl_flags_to_gre_flags(tpi->flags); + greh->protocol = tpi->proto; + + if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) { + __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); + + if (tpi->flags&TUNNEL_SEQ) { + *ptr = tpi->seq; + ptr--; + } + if (tpi->flags&TUNNEL_KEY) { + *ptr = tpi->key; + ptr--; + } + if (tpi->flags&TUNNEL_CSUM && + !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) { + *ptr = 0; + *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, + skb->len, 0)); + } + } +} +EXPORT_SYMBOL_GPL(gre_build_header); + +struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum) +{ + int err; + + if (likely(!skb->encapsulation)) { + skb_reset_inner_headers(skb); + skb->encapsulation = 1; + } + + if (skb_is_gso(skb)) { + err = skb_unclone(skb, GFP_ATOMIC); + if (unlikely(err)) + goto error; + skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; + return skb; + } else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) { + err = skb_checksum_help(skb); + if (unlikely(err)) + goto error; + } else if (skb->ip_summed != CHECKSUM_PARTIAL) + skb->ip_summed = CHECKSUM_NONE; + + return skb; +error: + kfree_skb(skb); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(gre_handle_offloads); + +static __sum16 check_checksum(struct sk_buff *skb) +{ + __sum16 csum = 0; + + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + csum = csum_fold(skb->csum); + + if (!csum) + break; + /* Fall through. */ + + case CHECKSUM_NONE: + skb->csum = 0; + csum = __skb_checksum_complete(skb); + skb->ip_summed = CHECKSUM_COMPLETE; + break; + } + + return csum; +} + +static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, + bool *csum_err) +{ + unsigned int ip_hlen = ip_hdrlen(skb); + const struct gre_base_hdr *greh; + __be32 *options; + int hdr_len; + + if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) + return -EINVAL; + + greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); + if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) + return -EINVAL; + + tpi->flags = gre_flags_to_tnl_flags(greh->flags); + hdr_len = ip_gre_calc_hlen(tpi->flags); + + if (!pskb_may_pull(skb, hdr_len)) + return -EINVAL; + + greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); + tpi->proto = greh->protocol; + + options = (__be32 *)(greh + 1); + if (greh->flags & GRE_CSUM) { + if (check_checksum(skb)) { + *csum_err = true; + return -EINVAL; + } + options++; + } + + if (greh->flags & GRE_KEY) { + tpi->key = *options; + options++; + } else + tpi->key = 0; + + if (unlikely(greh->flags & GRE_SEQ)) { + tpi->seq = *options; + options++; + } else + tpi->seq = 0; + + /* WCCP version 1 and 2 protocol decoding. + * - Change protocol to IP + * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header + */ + if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { + tpi->proto = htons(ETH_P_IP); + if ((*(u8 *)options & 0xF0) != 0x40) { + hdr_len += 4; + if (!pskb_may_pull(skb, hdr_len)) + return -EINVAL; + } + } + + return iptunnel_pull_header(skb, hdr_len, tpi->proto); +} + +static int gre_cisco_rcv(struct sk_buff *skb) +{ + struct tnl_ptk_info tpi; + int i; + bool csum_err = false; + + if (parse_gre_header(skb, &tpi, &csum_err) < 0) + goto drop; + + rcu_read_lock(); + for (i = 0; i < GRE_IP_PROTO_MAX; i++) { + struct gre_cisco_protocol *proto; + int ret; + + proto = rcu_dereference(gre_cisco_proto_list[i]); + if (!proto) + continue; + ret = proto->handler(skb, &tpi); + if (ret == PACKET_RCVD) { + rcu_read_unlock(); + return 0; + } + } + rcu_read_unlock(); + + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); +drop: + kfree_skb(skb); + return 0; +} + +static void gre_cisco_err(struct sk_buff *skb, u32 info) +{ + /* All the routers (except for Linux) return only + * 8 bytes of packet payload. It means, that precise relaying of + * ICMP in the real Internet is absolutely infeasible. + * + * Moreover, Cisco "wise men" put GRE key to the third word + * in GRE header. It makes impossible maintaining even soft + * state for keyed + * GRE tunnels with enabled checksum. Tell them "thank you". + * + * Well, I wonder, rfc1812 was written by Cisco employee, + * what the hell these idiots break standards established + * by themselves??? + */ + + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; + struct tnl_ptk_info tpi; + bool csum_err = false; + int i; + + if (parse_gre_header(skb, &tpi, &csum_err)) { + if (!csum_err) /* ignore csum errors. */ + return; + } + + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { + ipv4_update_pmtu(skb, dev_net(skb->dev), info, + skb->dev->ifindex, 0, IPPROTO_GRE, 0); + return; + } + if (type == ICMP_REDIRECT) { + ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, + IPPROTO_GRE, 0); + return; + } + + rcu_read_lock(); + for (i = 0; i < GRE_IP_PROTO_MAX; i++) { + struct gre_cisco_protocol *proto; + + proto = rcu_dereference(gre_cisco_proto_list[i]); + if (!proto) + continue; + + if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD) + goto out; + + } +out: + rcu_read_unlock(); +} + +static int gre_rcv(struct sk_buff *skb) +{ + const struct gre_protocol *proto; + u8 ver; + int ret; + + if (!pskb_may_pull(skb, 12)) + goto drop; + + ver = skb->data[1]&0x7f; + if (ver >= GREPROTO_MAX) + goto drop; + + rcu_read_lock(); + proto = rcu_dereference(gre_proto[ver]); + if (!proto || !proto->handler) + goto drop_unlock; + ret = proto->handler(skb); + rcu_read_unlock(); + return ret; + +drop_unlock: + rcu_read_unlock(); +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static void gre_err(struct sk_buff *skb, u32 info) +{ + const struct gre_protocol *proto; + const struct iphdr *iph = (const struct iphdr *)skb->data; + u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f; + + if (ver >= GREPROTO_MAX) + return; + + rcu_read_lock(); + proto = rcu_dereference(gre_proto[ver]); + if (proto && proto->err_handler) + proto->err_handler(skb, info); + rcu_read_unlock(); +} + +static const struct net_protocol net_gre_protocol = { + .handler = gre_rcv, + .err_handler = gre_err, + .netns_ok = 1, +}; + +static const struct gre_protocol ipgre_protocol = { + .handler = gre_cisco_rcv, + .err_handler = gre_cisco_err, +}; + +int gre_cisco_register(struct gre_cisco_protocol *newp) +{ + struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **) + &gre_cisco_proto_list[newp->priority]; + + return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY; +} +EXPORT_SYMBOL_GPL(gre_cisco_register); + +int gre_cisco_unregister(struct gre_cisco_protocol *del_proto) +{ + struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **) + &gre_cisco_proto_list[del_proto->priority]; + int ret; + + ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL; + + if (ret) + return ret; + + synchronize_net(); + return 0; +} +EXPORT_SYMBOL_GPL(gre_cisco_unregister); + +static int __init gre_init(void) +{ + pr_info("GRE over IPv4 demultiplexor driver\n"); + + if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { + pr_err("can't add protocol\n"); + goto err; + } + + if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) { + pr_info("%s: can't add ipgre handler\n", __func__); + goto err_gre; + } + + if (gre_offload_init()) { + pr_err("can't add protocol offload\n"); + goto err_gso; + } + + return 0; +err_gso: + gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); +err_gre: + inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); +err: + return -EAGAIN; +} + +static void __exit gre_exit(void) +{ + gre_offload_exit(); + + gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); + inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); +} + +module_init(gre_init); +module_exit(gre_exit); + +MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver"); +MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c new file mode 100644 index 000000000000..a9d8cd2bff4b --- /dev/null +++ b/net/ipv4/gre_offload.c @@ -0,0 +1,127 @@ +/* + * IPV4 GSO/GRO offload support + * Linux INET implementation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * GRE GSO support + */ + +#include +#include +#include + +static int gre_gso_send_check(struct sk_buff *skb) +{ + if (!skb->encapsulation) + return -EINVAL; + return 0; +} + +static struct sk_buff *gre_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + netdev_features_t enc_features; + int ghl = GRE_HEADER_SECTION; + struct gre_base_hdr *greh; + int mac_len = skb->mac_len; + __be16 protocol = skb->protocol; + int tnl_hlen; + bool csum; + + if (unlikely(skb_shinfo(skb)->gso_type & + ~(SKB_GSO_TCPV4 | + SKB_GSO_TCPV6 | + SKB_GSO_UDP | + SKB_GSO_DODGY | + SKB_GSO_TCP_ECN | + SKB_GSO_GRE))) + goto out; + + if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) + goto out; + + greh = (struct gre_base_hdr *)skb_transport_header(skb); + + if (greh->flags & GRE_KEY) + ghl += GRE_HEADER_SECTION; + if (greh->flags & GRE_SEQ) + ghl += GRE_HEADER_SECTION; + if (greh->flags & GRE_CSUM) { + ghl += GRE_HEADER_SECTION; + csum = true; + } else + csum = false; + + /* setup inner skb. */ + skb->protocol = greh->protocol; + skb->encapsulation = 0; + + if (unlikely(!pskb_may_pull(skb, ghl))) + goto out; + + __skb_pull(skb, ghl); + skb_reset_mac_header(skb); + skb_set_network_header(skb, skb_inner_network_offset(skb)); + skb->mac_len = skb_inner_network_offset(skb); + + /* segment inner packet. */ + enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + segs = skb_mac_gso_segment(skb, enc_features); + if (!segs || IS_ERR(segs)) + goto out; + + skb = segs; + tnl_hlen = skb_tnl_header_len(skb); + do { + __skb_push(skb, ghl); + if (csum) { + __be32 *pcsum; + + if (skb_has_shared_frag(skb)) { + int err; + + err = __skb_linearize(skb); + if (err) { + kfree_skb(segs); + segs = ERR_PTR(err); + goto out; + } + } + + greh = (struct gre_base_hdr *)(skb->data); + pcsum = (__be32 *)(greh + 1); + *pcsum = 0; + *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); + } + __skb_push(skb, tnl_hlen - ghl); + + skb_reset_mac_header(skb); + skb_set_network_header(skb, mac_len); + skb->mac_len = mac_len; + skb->protocol = protocol; + } while ((skb = skb->next)); +out: + return segs; +} + +static const struct net_offload gre_offload = { + .callbacks = { + .gso_send_check = gre_gso_send_check, + .gso_segment = gre_gso_segment, + }, +}; + +int __init gre_offload_init(void) +{ + return inet_add_offload(&gre_offload, IPPROTO_GRE); +} + +void __exit gre_offload_exit(void) +{ + inet_del_offload(&gre_offload, IPPROTO_GRE); +} -- cgit v1.2.3 From 9eb5bf838d06aa6ddebe4aca6b5cedcf2eb53b86 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 3 Jul 2013 05:02:22 -0700 Subject: net: sock: fix TCP_SKB_MIN_TRUESIZE commit eea86af6b1e18d ("net: sock: adapt SOCK_MIN_RCVBUF and SOCK_MIN_SNDBUF") forgot the sk_buff alignment taken into account in __alloc_skb() : skb->truesize = SKB_TRUESIZE(size); While above commit fixed the sender issue, the receiver is still dropping the second packet (on loopback device), because the receiver socket can not really hold two skbs : First packet truesize already is above sk_rcvbuf, so even TCP coalescing cannot help. On a typical 64bit build, each tcp skb truesize is 2304, instead of 2272 Signed-off-by: Eric Dumazet Cc: Daniel Borkmann Cc: Neal Cardwell Acked-by: Neal Cardwell Tested-by: Neal Cardwell Signed-off-by: David S. Miller --- include/net/sock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index ea6206ccc896..95a5a2c6925a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2052,7 +2052,7 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) * Note: for send buffers, TCP works better if we can build two skbs at * minimum. */ -#define TCP_SKB_MIN_TRUESIZE (2048 + sizeof(struct sk_buff)) +#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff))) #define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2) #define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE -- cgit v1.2.3 From 0e0764715d8116484d808f5b3985ca043080788e Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Thu, 4 Jul 2013 10:35:48 +0100 Subject: dt:net:stmmac: Add dt specific phy reset callback support. This patch adds phy reset callback support for stmmac driver via device trees. It adds three new properties to gmac device tree bindings to define the reset signal via gpio. With this patch users can conveniently pass reset gpio number with pre, pulse and post delay in micro secs via DTs. active low: _________ ____________ | | | | |_______________| active high: ________________ | | | | ________| |___________ Signed-off-by: Srinivas Kandagatla Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/stmmac.txt | 6 +++ drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 48 ++++++++++++++++++++++- include/linux/stmmac.h | 4 ++ 3 files changed, 56 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index e1ddfcc46a4e..261c563b5f06 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -13,6 +13,12 @@ Required properties: - phy-mode: String, operation mode of the PHY interface. Supported values are: "mii", "rmii", "gmii", "rgmii". - snps,phy-addr phy address to connect to. +- snps,reset-gpio gpio number for phy reset. +- snps,reset-active-low boolean flag to indicate if phy reset is active low. +- snps,reset-delays-us is triplet of delays + The 1st cell is reset pre-delay in micro seconds. + The 2nd cell is reset pulse in micro seconds. + The 3rd cell is reset post-delay in micro seconds. - snps,pbl Programmable Burst Length - snps,fixed-burst Program the DMA to use the fixed burst mode - snps,mixed-burst Program the DMA to use the mixed burst mode diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index cc15039eaa47..fe7bc9903867 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -27,6 +27,9 @@ #include #include #include +#include +#include + #include #include "stmmac.h" @@ -131,10 +134,46 @@ static int stmmac_mdio_reset(struct mii_bus *bus) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; + struct stmmac_mdio_bus_data *data = priv->plat->mdio_bus_data; + +#ifdef CONFIG_OF + if (priv->device->of_node) { + int reset_gpio, active_low; + + if (data->reset_gpio < 0) { + struct device_node *np = priv->device->of_node; + if (!np) + return 0; + + data->reset_gpio = of_get_named_gpio(np, + "snps,reset-gpio", 0); + if (data->reset_gpio < 0) + return 0; + + data->active_low = of_property_read_bool(np, + "snps,reset-active-low"); + of_property_read_u32_array(np, + "snps,reset-delays-us", data->delays, 3); + } + + reset_gpio = data->reset_gpio; + active_low = data->active_low; + + if (!gpio_request(reset_gpio, "mdio-reset")) { + gpio_direction_output(reset_gpio, active_low ? 1 : 0); + udelay(data->delays[0]); + gpio_set_value(reset_gpio, active_low ? 0 : 1); + udelay(data->delays[1]); + gpio_set_value(reset_gpio, active_low ? 1 : 0); + udelay(data->delays[2]); + gpio_free(reset_gpio); + } + } +#endif - if (priv->plat->mdio_bus_data->phy_reset) { + if (data->phy_reset) { pr_debug("stmmac_mdio_reset: calling phy_reset\n"); - priv->plat->mdio_bus_data->phy_reset(priv->plat->bsp_priv); + data->phy_reset(priv->plat->bsp_priv); } /* This is a workaround for problems with the STE101P PHY. @@ -172,6 +211,11 @@ int stmmac_mdio_register(struct net_device *ndev) else irqlist = priv->mii_irq; +#ifdef CONFIG_OF + if (priv->device->of_node) + mdio_bus_data->reset_gpio = -1; +#endif + new_bus->name = "stmmac"; new_bus->read = &stmmac_mdio_read; new_bus->write = &stmmac_mdio_write; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index c1b3ed3fb787..9e495d31516e 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -80,6 +80,10 @@ struct stmmac_mdio_bus_data { unsigned int phy_mask; int *irqs; int probed_phy_irq; +#ifdef CONFIG_OF + int reset_gpio, active_low; + u32 delays[3]; +#endif }; struct stmmac_dma_cfg { -- cgit v1.2.3 From cbf55001b2ddb814329735641be5d29b08c82b08 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 8 Jul 2013 16:20:34 +0300 Subject: net: rename low latency sockets functions to busy poll Rename functions in include/net/ll_poll.h to busy wait. Clarify documentation about expected power use increase. Rename POLL_LL to POLL_BUSY_LOOP. Add need_resched() testing to poll/select busy loops. Note, that in select and poll can_busy_poll is dynamic and is updated continuously to reflect the existence of supported sockets with valid queue information. Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- Documentation/sysctl/net.txt | 12 +++++---- fs/select.c | 60 +++++++++++++++++++++++++---------------- include/net/ll_poll.h | 46 ++++++++++++++++--------------- include/uapi/asm-generic/poll.h | 2 +- net/core/datagram.c | 3 ++- net/ipv4/tcp.c | 6 ++--- net/socket.c | 12 ++++----- 7 files changed, 80 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index e658bbfb641f..7323b88e26be 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -53,22 +53,24 @@ Default: 64 low_latency_read ---------------- Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) -Approximate time in us to spin waiting for packets on the device queue. +Approximate time in us to busy loop waiting for packets on the device queue. This sets the default value of the SO_LL socket option. -Can be set or overridden per socket by setting socket option SO_LL. -Recommended value is 50. May increase power usage. +Can be set or overridden per socket by setting socket option SO_LL, which is +the preferred method of enabling. +If you need to enable the feature globally via sysctl, a value of 50 is recommended. +Will increase power usage. Default: 0 (off) low_latency_poll ---------------- Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) -Approximate time in us to spin waiting for packets on the device queue. +Approximate time in us to busy loop waiting for events. Recommended value depends on the number of sockets you poll on. For several sockets 50, for several hundreds 100. For more than that you probably want to use epoll. Note that only sockets with SO_LL set will be busy polled, so you want to either selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally. -May increase power usage. +Will increase power usage. Default: 0 (off) rmem_default diff --git a/fs/select.c b/fs/select.c index f28a58592725..25cac5faf6d6 100644 --- a/fs/select.c +++ b/fs/select.c @@ -402,9 +402,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) poll_table *wait; int retval, i, timed_out = 0; unsigned long slack = 0; - unsigned int ll_flag = ll_get_flag(); - u64 ll_start = ll_start_time(ll_flag); - u64 ll_time = ll_run_time(); + unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; + u64 busy_start = busy_loop_start_time(busy_flag); + u64 busy_end = busy_loop_end_time(); rcu_read_lock(); retval = max_select_fd(n, fds); @@ -427,7 +427,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; - bool can_ll = false; + bool can_busy_loop = false; inp = fds->in; outp = fds->out; exp = fds->ex; rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; @@ -456,7 +456,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) mask = DEFAULT_POLLMASK; if (f_op && f_op->poll) { wait_key_set(wait, in, out, - bit, ll_flag); + bit, busy_flag); mask = (*f_op->poll)(f.file, wait); } fdput(f); @@ -475,11 +475,18 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) retval++; wait->_qproc = NULL; } - if (mask & POLL_LL) - can_ll = true; /* got something, stop busy polling */ - if (retval) - ll_flag = 0; + if (retval) { + can_busy_loop = false; + busy_flag = 0; + + /* + * only remember a returned + * POLL_BUSY_LOOP if we asked for it + */ + } else if (busy_flag & mask) + can_busy_loop = true; + } } if (res_in) @@ -498,8 +505,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) break; } - /* only if on, have sockets with POLL_LL and not out of time */ - if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time)) + /* only if found POLL_BUSY_LOOP sockets && not out of time */ + if (!need_resched() && can_busy_loop && + busy_loop_range(busy_start, busy_end)) continue; /* @@ -734,7 +742,8 @@ struct poll_list { * if pwait->_qproc is non-NULL. */ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, - bool *can_ll, unsigned int ll_flag) + bool *can_busy_poll, + unsigned int busy_flag) { unsigned int mask; int fd; @@ -748,10 +757,10 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, mask = DEFAULT_POLLMASK; if (f.file->f_op && f.file->f_op->poll) { pwait->_key = pollfd->events|POLLERR|POLLHUP; - pwait->_key |= ll_flag; + pwait->_key |= busy_flag; mask = f.file->f_op->poll(f.file, pwait); - if (mask & POLL_LL) - *can_ll = true; + if (mask & busy_flag) + *can_busy_poll = true; } /* Mask out unneeded events. */ mask &= pollfd->events | POLLERR | POLLHUP; @@ -770,9 +779,10 @@ static int do_poll(unsigned int nfds, struct poll_list *list, ktime_t expire, *to = NULL; int timed_out = 0, count = 0; unsigned long slack = 0; - unsigned int ll_flag = ll_get_flag(); - u64 ll_start = ll_start_time(ll_flag); - u64 ll_time = ll_run_time(); + unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; + u64 busy_start = busy_loop_start_time(busy_flag); + u64 busy_end = busy_loop_end_time(); + /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -785,7 +795,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, for (;;) { struct poll_list *walk; - bool can_ll = false; + bool can_busy_loop = false; for (walk = list; walk != NULL; walk = walk->next) { struct pollfd * pfd, * pfd_end; @@ -800,10 +810,13 @@ static int do_poll(unsigned int nfds, struct poll_list *list, * this. They'll get immediately deregistered * when we break out and return. */ - if (do_pollfd(pfd, pt, &can_ll, ll_flag)) { + if (do_pollfd(pfd, pt, &can_busy_loop, + busy_flag)) { count++; pt->_qproc = NULL; - ll_flag = 0; + /* found something, stop busy polling */ + busy_flag = 0; + can_busy_loop = false; } } } @@ -820,8 +833,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list, if (count || timed_out) break; - /* only if on, have sockets with POLL_LL and not out of time */ - if (ll_flag && can_ll && can_poll_ll(ll_start, ll_time)) + /* only if found POLL_BUSY_LOOP sockets && not out of time */ + if (!need_resched() && can_busy_loop && + busy_loop_range(busy_start, busy_end)) continue; /* diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 0d620ba19bc5..f14dd88dafc8 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -37,9 +37,9 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; #define LL_FLUSH_FAILED -1 #define LL_FLUSH_BUSY -2 -static inline unsigned int ll_get_flag(void) +static inline bool net_busy_loop_on(void) { - return sysctl_net_ll_poll ? POLL_LL : 0; + return sysctl_net_ll_poll; } /* a wrapper to make debug_smp_processor_id() happy @@ -47,7 +47,7 @@ static inline unsigned int ll_get_flag(void) * we only care that the average is bounded */ #ifdef CONFIG_DEBUG_PREEMPT -static inline u64 ll_sched_clock(void) +static inline u64 busy_loop_sched_clock(void) { u64 rc; @@ -58,7 +58,7 @@ static inline u64 ll_sched_clock(void) return rc; } #else /* CONFIG_DEBUG_PREEMPT */ -static inline u64 ll_sched_clock(void) +static inline u64 busy_loop_sched_clock(void) { return sched_clock(); } @@ -67,7 +67,7 @@ static inline u64 ll_sched_clock(void) /* we don't mind a ~2.5% imprecision so <<10 instead of *1000 * sk->sk_ll_usec is a u_int so this can't overflow */ -static inline u64 ll_sk_run_time(struct sock *sk) +static inline u64 sk_busy_loop_end_time(struct sock *sk) { return (u64)ACCESS_ONCE(sk->sk_ll_usec) << 10; } @@ -75,27 +75,29 @@ static inline u64 ll_sk_run_time(struct sock *sk) /* in poll/select we use the global sysctl_net_ll_poll value * only call sched_clock() if enabled */ -static inline u64 ll_run_time(void) +static inline u64 busy_loop_end_time(void) { return (u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10; } -/* if flag is not set we don't need to know the time */ -static inline u64 ll_start_time(unsigned int flag) +/* if flag is not set we don't need to know the time + * so we want to avoid a potentially expensive sched_clock() + */ +static inline u64 busy_loop_start_time(unsigned int flag) { - return flag ? ll_sched_clock() : 0; + return flag ? busy_loop_sched_clock() : 0; } -static inline bool sk_valid_ll(struct sock *sk) +static inline bool sk_can_busy_loop(struct sock *sk) { return sk->sk_ll_usec && sk->sk_napi_id && !need_resched() && !signal_pending(current); } /* careful! time_in_range64 will evaluate now twice */ -static inline bool can_poll_ll(u64 start_time, u64 run_time) +static inline bool busy_loop_range(u64 start_time, u64 run_time) { - u64 now = ll_sched_clock(); + u64 now = busy_loop_sched_clock(); return time_in_range64(now, start_time, start_time + run_time); } @@ -103,10 +105,10 @@ static inline bool can_poll_ll(u64 start_time, u64 run_time) /* when used in sock_poll() nonblock is known at compile time to be true * so the loop and end_time will be optimized out */ -static inline bool sk_poll_ll(struct sock *sk, int nonblock) +static inline bool sk_busy_loop(struct sock *sk, int nonblock) { - u64 start_time = ll_start_time(!nonblock); - u64 run_time = ll_sk_run_time(sk); + u64 start_time = busy_loop_start_time(!nonblock); + u64 end_time = sk_busy_loop_end_time(sk); const struct net_device_ops *ops; struct napi_struct *napi; int rc = false; @@ -137,7 +139,7 @@ static inline bool sk_poll_ll(struct sock *sk, int nonblock) LINUX_MIB_LOWLATENCYRXPACKETS, rc); } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && - can_poll_ll(start_time, run_time)); + busy_loop_range(start_time, end_time)); rc = !skb_queue_empty(&sk->sk_receive_queue); out: @@ -158,27 +160,27 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) } #else /* CONFIG_NET_LL_RX_POLL */ -static inline unsigned long ll_get_flag(void) +static inline unsigned long net_busy_loop_on(void) { return 0; } -static inline u64 ll_start_time(unsigned int flag) +static inline u64 busy_loop_start_time(unsigned int flag) { return 0; } -static inline u64 ll_run_time(void) +static inline u64 busy_loop_end_time(void) { return 0; } -static inline bool sk_valid_ll(struct sock *sk) +static inline bool sk_can_busy_loop(struct sock *sk) { return false; } -static inline bool sk_poll_ll(struct sock *sk, int nonblock) +static inline bool sk_busy_poll(struct sock *sk, int nonblock) { return false; } @@ -191,7 +193,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) { } -static inline bool can_poll_ll(u64 start_time, u64 run_time) +static inline bool busy_loop_range(u64 start_time, u64 run_time) { return false; } diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h index 4aee586979ca..a9694982689f 100644 --- a/include/uapi/asm-generic/poll.h +++ b/include/uapi/asm-generic/poll.h @@ -30,7 +30,7 @@ #define POLLFREE 0x4000 /* currently only for epoll */ -#define POLL_LL 0x8000 +#define POLL_BUSY_LOOP 0x8000 struct pollfd { int fd; diff --git a/net/core/datagram.c b/net/core/datagram.c index 9cbaba98ce4c..6e9ab31e457e 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -208,7 +208,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, } spin_unlock_irqrestore(&queue->lock, cpu_flags); - if (sk_valid_ll(sk) && sk_poll_ll(sk, flags & MSG_DONTWAIT)) + if (sk_can_busy_loop(sk) && + sk_busy_loop(sk, flags & MSG_DONTWAIT)) continue; /* User doesn't want to wait */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 46ed9afd1f5e..15cbfa94bd8e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1554,9 +1554,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct sk_buff *skb; u32 urg_hole = 0; - if (sk_valid_ll(sk) && skb_queue_empty(&sk->sk_receive_queue) - && (sk->sk_state == TCP_ESTABLISHED)) - sk_poll_ll(sk, nonblock); + if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && + (sk->sk_state == TCP_ESTABLISHED)) + sk_busy_loop(sk, nonblock); lock_sock(sk); diff --git a/net/socket.c b/net/socket.c index 4da14cbd49b6..45afa648364a 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1148,7 +1148,7 @@ EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { - unsigned int ll_flag = 0; + unsigned int busy_flag = 0; struct socket *sock; /* @@ -1156,16 +1156,16 @@ static unsigned int sock_poll(struct file *file, poll_table *wait) */ sock = file->private_data; - if (sk_valid_ll(sock->sk)) { + if (sk_can_busy_loop(sock->sk)) { /* this socket can poll_ll so tell the system call */ - ll_flag = POLL_LL; + busy_flag = POLL_BUSY_LOOP; /* once, only if requested by syscall */ - if (wait && (wait->_key & POLL_LL)) - sk_poll_ll(sock->sk, 1); + if (wait && (wait->_key & POLL_BUSY_LOOP)) + sk_busy_loop(sock->sk, 1); } - return ll_flag | sock->ops->poll(file, sock, wait); + return busy_flag | sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) -- cgit v1.2.3 From 76b1e9b9813e412bde7446525f6c299803713545 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Tue, 9 Jul 2013 13:09:21 +0300 Subject: net/fs: change busy poll time accounting Suggested by Linus: Changed time accounting for busy-poll: - Make it microsecond based. - Use unsigned longs. - Revert back to use time_after instead of time_in_range. Reorder poll/select busy loop conditions: - Clear busy_flag after one time we can't busy-poll. - Only init busy_end if we actually are going to busy-poll. Added one more missing need_resched() test. Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- fs/select.c | 31 ++++++++++++++++++----------- include/net/ll_poll.h | 55 +++++++++++++++++---------------------------------- 2 files changed, 38 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/fs/select.c b/fs/select.c index 25cac5faf6d6..50a804b6839f 100644 --- a/fs/select.c +++ b/fs/select.c @@ -403,8 +403,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) int retval, i, timed_out = 0; unsigned long slack = 0; unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; - u64 busy_start = busy_loop_start_time(busy_flag); - u64 busy_end = busy_loop_end_time(); + unsigned long busy_end = 0; rcu_read_lock(); retval = max_select_fd(n, fds); @@ -506,9 +505,15 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) } /* only if found POLL_BUSY_LOOP sockets && not out of time */ - if (!need_resched() && can_busy_loop && - busy_loop_range(busy_start, busy_end)) - continue; + if (can_busy_loop && !need_resched()) { + if (!busy_end) { + busy_end = busy_loop_end_time(); + continue; + } + if (!busy_loop_timeout(busy_end)) + continue; + } + busy_flag = 0; /* * If this is the first loop and we have a timeout @@ -780,9 +785,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, int timed_out = 0, count = 0; unsigned long slack = 0; unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; - u64 busy_start = busy_loop_start_time(busy_flag); - u64 busy_end = busy_loop_end_time(); - + unsigned long busy_end = 0; /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -834,9 +837,15 @@ static int do_poll(unsigned int nfds, struct poll_list *list, break; /* only if found POLL_BUSY_LOOP sockets && not out of time */ - if (!need_resched() && can_busy_loop && - busy_loop_range(busy_start, busy_end)) - continue; + if (can_busy_loop && !need_resched()) { + if (!busy_end) { + busy_end = busy_loop_end_time(); + continue; + } + if (!busy_loop_timeout(busy_end)) + continue; + } + busy_flag = 0; /* * If this is the first loop and we have a timeout diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index f14dd88dafc8..76f034087743 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -47,7 +47,7 @@ static inline bool net_busy_loop_on(void) * we only care that the average is bounded */ #ifdef CONFIG_DEBUG_PREEMPT -static inline u64 busy_loop_sched_clock(void) +static inline u64 busy_loop_us_clock(void) { u64 rc; @@ -55,37 +55,24 @@ static inline u64 busy_loop_sched_clock(void) rc = sched_clock(); preempt_enable_no_resched_notrace(); - return rc; + return rc >> 10; } #else /* CONFIG_DEBUG_PREEMPT */ -static inline u64 busy_loop_sched_clock(void) +static inline u64 busy_loop_us_clock(void) { - return sched_clock(); + return sched_clock() >> 10; } #endif /* CONFIG_DEBUG_PREEMPT */ -/* we don't mind a ~2.5% imprecision so <<10 instead of *1000 - * sk->sk_ll_usec is a u_int so this can't overflow - */ -static inline u64 sk_busy_loop_end_time(struct sock *sk) +static inline unsigned long sk_busy_loop_end_time(struct sock *sk) { - return (u64)ACCESS_ONCE(sk->sk_ll_usec) << 10; + return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec); } -/* in poll/select we use the global sysctl_net_ll_poll value - * only call sched_clock() if enabled - */ -static inline u64 busy_loop_end_time(void) -{ - return (u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10; -} - -/* if flag is not set we don't need to know the time - * so we want to avoid a potentially expensive sched_clock() - */ -static inline u64 busy_loop_start_time(unsigned int flag) +/* in poll/select we use the global sysctl_net_ll_poll value */ +static inline unsigned long busy_loop_end_time(void) { - return flag ? busy_loop_sched_clock() : 0; + return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll); } static inline bool sk_can_busy_loop(struct sock *sk) @@ -94,12 +81,12 @@ static inline bool sk_can_busy_loop(struct sock *sk) !need_resched() && !signal_pending(current); } -/* careful! time_in_range64 will evaluate now twice */ -static inline bool busy_loop_range(u64 start_time, u64 run_time) + +static inline bool busy_loop_timeout(unsigned long end_time) { - u64 now = busy_loop_sched_clock(); + unsigned long now = busy_loop_us_clock(); - return time_in_range64(now, start_time, start_time + run_time); + return time_after(now, end_time); } /* when used in sock_poll() nonblock is known at compile time to be true @@ -107,8 +94,7 @@ static inline bool busy_loop_range(u64 start_time, u64 run_time) */ static inline bool sk_busy_loop(struct sock *sk, int nonblock) { - u64 start_time = busy_loop_start_time(!nonblock); - u64 end_time = sk_busy_loop_end_time(sk); + unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; const struct net_device_ops *ops; struct napi_struct *napi; int rc = false; @@ -139,7 +125,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock) LINUX_MIB_LOWLATENCYRXPACKETS, rc); } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && - busy_loop_range(start_time, end_time)); + !need_resched() && !busy_loop_timeout(end_time)); rc = !skb_queue_empty(&sk->sk_receive_queue); out: @@ -165,12 +151,7 @@ static inline unsigned long net_busy_loop_on(void) return 0; } -static inline u64 busy_loop_start_time(unsigned int flag) -{ - return 0; -} - -static inline u64 busy_loop_end_time(void) +static inline unsigned long busy_loop_end_time(void) { return 0; } @@ -193,9 +174,9 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) { } -static inline bool busy_loop_range(u64 start_time, u64 run_time) +static inline bool busy_loop_timeout(unsigned long end_time) { - return false; + return true; } #endif /* CONFIG_NET_LL_RX_POLL */ -- cgit v1.2.3 From cc229884d3f77ec3b1240e467e0236c3e0647c0c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 9 Jul 2013 13:19:18 +0300 Subject: virtio: support unlocked queue poll This adds a way to check ring empty state after enable_cb outside any locks. Will be used by virtio_net. Note: there's room for more optimization: caller is likely to have a memory barrier already, which means we might be able to get rid of a barrier here. Deferring this optimization until we do some benchmarking. Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/virtio/virtio_ring.c | 56 ++++++++++++++++++++++++++++++++++---------- include/linux/virtio.h | 4 ++++ 2 files changed, 48 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5217baf5528c..37d58f84dc50 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq) EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** - * virtqueue_enable_cb - restart callbacks after disable_cb. + * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @vq: the struct virtqueue we're talking about. * - * This re-enables callbacks; it returns "false" if there are pending - * buffers in the queue, to detect a possible race between the driver - * checking for more work, and enabling callbacks. + * This re-enables callbacks; it returns current queue state + * in an opaque unsigned value. This value should be later tested by + * virtqueue_poll, to detect a possible race between the driver checking for + * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ -bool virtqueue_enable_cb(struct virtqueue *_vq) +unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); + u16 last_used_idx; START_USE(vq); @@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; - vring_used_event(&vq->vring) = vq->last_used_idx; + vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; + END_USE(vq); + return last_used_idx; +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); + +/** + * virtqueue_poll - query pending used buffers + * @vq: the struct virtqueue we're talking about. + * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). + * + * Returns "true" if there are pending used buffers in the queue. + * + * This does not need to be serialized. + */ +bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + virtio_mb(vq->weak_barriers); - if (unlikely(more_used(vq))) { - END_USE(vq); - return false; - } + return (u16)last_used_idx != vq->vring.used->idx; +} +EXPORT_SYMBOL_GPL(virtqueue_poll); - END_USE(vq); - return true; +/** + * virtqueue_enable_cb - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns "false" if there are pending + * buffers in the queue, to detect a possible race between the driver + * checking for more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +bool virtqueue_enable_cb(struct virtqueue *_vq) +{ + unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); + return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 9ff8645b7e0b..72398eea6e86 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -70,6 +70,10 @@ void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); +unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); + +bool virtqueue_poll(struct virtqueue *vq, unsigned); + bool virtqueue_enable_cb_delayed(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq); -- cgit v1.2.3